var/home/core/zuul-output/0000755000175000017500000000000015137577203014537 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015137606271015501 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000307540615137606203020270 0ustar corecore ikubelet.log_o[;r)Br'o-n(!9t%Cs7}g/غIs,r.k9GfB > "mv?_eGbuuțx{w7ݭ7֫~𒆷7̗8zTY\].f}嗷ovϷw_>on3cvX~egQBeH,nWb m/m}*L~AzHev_uαHJ2E$(Ͽ|/+k*z>p R⥑gF)49)(oՈ7_k0m^p9PneQn͂YEeeɹ ^ʙ|ʕ0MۂAraZR}@Er3 FKX1QRQlrTvb)E,s)Wɀ;$#LcdHM%vz_. o~I|3j dF{ "IΩ?PF~J~ ` 17ׅwڋًM)$Fiqw7Gt7L"u 0V9c  ˹dvYļU[ Z.׿-h QZ*U1|t5wKOؾ{mk b2 ܨ;RJK!b>JR*kl|+"N'C_#a7]d]sJg;;>Yp׫,w`ɚ'd$ecwŻ^~7EpQС3DCS[Yʧ?DDS aw߾)VxX帟AB}nyи0stĈCo.:wAZ{sy:7qsWctx{}n-+ZYsI{/.Ra9XcђQ0FK@aE mQYd'xP2ewEڊL|^ͣrZg7n͐AG%ʷr<>; 2W>h?y|(G>ClsXT(VIx$(J:&~CQpkۗgVKx*lJ3o|s`<՛=JPBUGߩnX#;4ٻO2{Fݫr~AreFj?wQC9yO|$UvވkZoIfzC|]|[>ӸUKҳt17ä$ ֈm maUNvS_$qrMY QOΨN!㞊;4U^Z/ QB?q3En.اeI"X#gZ+Xk?povR]8~깮$b@n3xh!|t{: CºC{ 8Ѿm[ ~z/9آs;DPsif39HoN λC?; H^-¸oZ( +"@@%'0MtW#:7erԮoQ#% H!PK)~U,jxQV^pΣ@Klb5)%L%7׷v] gv6دϾDD}c6  %T%St{kJ_O{*Z8Y CEO+'HqZY PTUJ2dic3w ?YQgpa` Z_0΁?kMPc_Ԝ*΄Bs`kmJ?t 53@հ1hr}=5t;nt 9:I_|AאM'NO;uD,z҄R K&Nh c{A`?2ZҘ[a-0V&2D[d#L6l\Jk}8gf) afs'oIf'mf\>UxR ks J)'u4iLaNIc2qdNA&aLQVD R0*06V۽棬mpھ*V I{a 0Ҟҝ>Ϗ ,ȓw`Ȅ/2Zjǽ}W4D)3N*[kPF =trSE *b9ē7$ M_8.Ç"q ChCMAgSdL0#W+CUu"k"圀̲F9,,&h'ZJz4U\d +( 7EqڏuC+]CEF 8'9@OVvnNbm: X„RDXfיa }fqG*YƩ{P0K=( $hC=h2@M+ `@P4Re]1he}k|]eO,v^ȹ [=zX[tꆯI7c<ۃ'B쿫dIc*Qqk&60XdGY!D ' @{!b4ִ s Exb 5dKߤKߒ'&YILұ4q6y{&G`%$8Tt ȥ#5vGVO2Қ;m#NS8}d0Q?zLV3\LuOx:,|$;rVauNjk-ؘPꐤ`FD'JɻXC&{>.}y7Z,).Y톯h7n%PAUË?/,z_jx܍>М>ӗom$rۇnu~Y݇̇TIwӜ'}׃nxuoỴRZ&Yzbm ]) %1(Y^9{q"4e?x+ [Vz;E|d1&ږ/0-Vb=SSO|k1A[|gbͧɇد;:X:@;afU=Sru CK >Y%LwM*t{zƝ$;ȾjHim @tBODɆj>0st\t@HTu( v e`H*1aK`3CmF1K>*Mk{_'֜dN${OT-n,'}6ȴ .#Sqη9]5zoX#ZVOy4%-Lq6dACYm*H@:FUф(vcD%F"i ' VVdmcOTKpwq.M?m12N[=tuw}opYG]2u<ΰ+a1tHayɒ aY(P*aaʨ@ΰ<pX X{k[%Egl1$9  ֲQ$'dJVE%mT{z`R$77.N|b>harNJ(Bň0ae3V#b,PY0TEu1L/]MTB4$`H6NI\nbǛ*AyA\(u|@ [h-,j7gDTÎ4oWJ$j!fc̖F4BJ2ᮚ苮p(r%Q 6<$(Ӣ(RvA AmvI<'Y]Q4`Iz_*2coT'ƟlQ.Ff!bpRw@\6"yr+i37Z_j*YLfnYJ~Z~okJX ?A?gU3U;,ד1rlJ#wՆ;I|p"+I4ˬZcն a.1wXhxDI:;.^m9W_c.4/!~x]y7D7@t邗`unn_ư-a9t/.9tTo]r8-X{TMYtt =0AMUk}G9^UA,;Tt,"Dxl DfA\w; &`Ͱ٢x'H/jh7hM=~ ֟y[dI~fHIqC۶1Ik\)3 5Ķ']?SؠC"j_6Ÿ9؎]TTjm\D^x6ANbC ]tVUKe$,\ܺI `Qز@UӬ@B {~6caR!=A>\+܁<lW Gϸ}^w'̅dk  C 7fbU{3Se[s %'!?xL 2ڲ]>i+m^CM&WTj7ȗE!NC6P}H`k(FUM gul)b ;2n6'k}ˍ[`-fYX_pL +1wu(#'3"fxsuҮױdy.0]?ݽb+ uV4}rdM$ѢIA$;~Lvigu+]NC5ÿ nNჶT@~ܥ 7-mU,\rXmQALglNʆ P7k%v>"WCyVtnV K`pC?fE?~fjBwU&'ᚡilRї`m] leu]+?T4v\% ;qF0qV(]pP4W =d#t ru\M{Nj.~27)p|Vn60֭l$4԰vg`i{ 6uwŇctyX{>GXg&[ņzP8_ "J~7+0_t[%XU͍ &dtO:odtRWon%*44JٵK+Woc.F3 %N%FF"HH"\$ۤ5UWd̡bh塘ZRI&{3TUFp/:4TƳ5[۲yzz+ 4D.Ճ`!TnPFp':.4dMFN=/5ܙz,4kA<:z7y0^}'[LR"w F05N<&AJ3DA0ʄ4(zTUWDdE3̻l^-Xw3Fɀ{B-~.h+U8 i1b8wؖ#~zQ`/L 9#Pu/<4A L<KL U(Ee'sCcq !Ȥ4΍ +aM(VldX ][T !Ȱ|HN~6y,⒊)$e{)SR#kהyϛ7^i58f4PmB8 Y{qeφvk73:1@ƛ.{f8IGv*1藺yx27M=>+VnG;\<DEu&ݛȘPˬ-Ő\B`xr`"F'Iٺ*DnA)yzr^!3Ír!S$,.:+d̋BʺJ#SX*8ҁW7~>oOFe-<uJQ|FZEP__gi(`0/ƍcv7go2G$ N%v$^^&Q 4AMbvvɀ1J{ڔhэK'9*W )IYO;E4z⛢79"hK{BFEmBAΛ3>IO j u߿d{=t-n3Pnef9[}=%G*9sX,¬xS&9'E&"/"ncx}"mV5tŘ:wcZ К G)]$mbXE ^ǽ8%>,0FЕ 6vAVKVCjrD25#Lrv?33Iam:xy`|Q'eű^\ơ' .gygSAixپ im41;P^azl5|JE2z=.wcMԧ ax& =`|#HQ*lS<.U׻`>ajϿ '!9MHK:9#s,jV剤C:LIeHJ"M8P,$N;a-zݸJWc :.<sR6 լ$gu4M*B(A ݖΑِ %H;S*ڳJt>$M!^*n3qESfU, Iĭb#UFJPvBgZvn aE5}~2E|=D' ܇q>8[¿yp/9Om/5|k \6xH.Z'OeCD@cq:Y~<1LٖY9# xe8g IKTQ:+Xg:*}.<M{ZH[^>m0G{ ̷hiOO|9Y"mma[sSbb'Rv&{@6; KE.a\}:<]Oyve3h9}E[kMD,5 %sO{킒 8.K?]i/`׎tp NvԻV4|<{H@#*h{Yp/E%dlh\bU:E%h@&SEK [ Ƣ xg{z%ǻViX~鮦w35QE~qp[ʕ@}ZL! Z0!A⼏q)[f &E1K3i+`JG P/EG 4 9LڑKL|`PОnG#|}qOR{Q|2_tH߫%pD?1%(@nfxOrs25rMլf{sk7݇fjӞh2HkeL'Wʿ}Ƞ%>9cSH|cEyQp 'ˢd:,v-us"Iidw>%zM@9IqrGq:&_p3õB!>9'0LL]M[lwWVR9I5YpVgtuZfG{RoZr3ٮr;wW:͋nqCRu1y=㊻Ij z[|W%q0 CJV٨3,ib{eH7 mҝ(3ɏO/̗-=OR\dIoHZ6n`R֑&#.Mv0vԬ]I˟vrK}F9X|FI#g.Gi)%!iK|o}|ֵ7!ېATJKB2Z/"BfB(gdj۸=}'),-iX'|M2roK\e5Pt:*qSHk$f:{7N>^uU` c/X)mS5KC߄":{H)"%,!3w{"ZWÂk>/F?RJ>FIY*%5Hg}3Ď89؟N/pgÞ tJXB-Gjsٶ 3Gzp؍H|*cyp@\첹,[up`uV,\KCB\qGiW痃[?i?S{eϻl71X:݌>EEly(*SHN:ӫOq{{L$?Q{϶(F_Ej>3mqfΤP-j)H˧&8?a?2xĐ+EV؍x0bv6 fd1^ 2ӎԥ sZR cgu/bn/34'h9Dݥ:U:vV[ 'Mȥ@ەX㧿-p0?Q6 y2XN2_h~Cֆ֙82)=Ȓ7D- V)T? O/VFeUk'7KIT, WeՔ}-66V؅ʹ;T$pZ#@L; ?0]"2v[hׂ'cJ6H4bs+3(@z$.K!#Šj2ݢxK-di +9Hᇷ絻+ O.i2.I+69EVyw8//|~<ëng)P<xͯ~? fp,CǴ_BjDN^5)s('cBh+6ez0)_~zJz"ё`Z&Z![0rGBK 5G~<:H~W>;ٍVnSt%_!BZMMeccBҎÒJH+"ūyR}X~juPp- j\hЪQxchKaS,xS"cV8i8'-sOKB<չw"|{/MC8&%Og3E#O%`N)p#4YUh^ ɨڻ#Ch@(R &Z+<3ݰb/St=&yo|BL,1+t C<ˉvRfQ/[?)xx 찤Q!b%U=(Kb4 1\)y$.M饸+ wcV?C)MΈ^RNi?u3Np> x삖A7 u/~&ӄMu.<|yi I?@)XJ7{ޱ?Q]{#\4ZfR-dVaz./f+yGNMGOK?2_~3\z=y}^G$*A! IcuR.o=MZ9zu b#s9@*иrI@*qQN||Ix;I}&ݢ6ɢ}{]x}_o>Mm8S]~(EX{S yNwgwml"Ms>\΋"?|nKfֱn !u:%lXZlvwohbL_#ǂsr_d >04SRm+0^PTi-"] O('@BKD6 {NmʐzRj.aQcb^CZ-uvpr CѐٱlGNzIveca=%1Qi F>wTLHUGӃ\sA֎Xpljlv ^tSȻ \cPwίwX"{>9V0ټ_`#U8VdTtD_GU9V ұ{q:ObUi7s )B ۊZlzIA4S#x,T3ѱ ԶJ=rs>Nb: Q6ˌ?J%.Dl2ȱ%ܱ&6XƟ6qg(ƟUSok+Po$lwvmi8W_VT18V =| ub6QWCnY'"*aN08wuSH]f@!=߸2V%Z 0"G4ȇʩ@]>Y$ًF_Mm_Tt)ib+q&EXFu򾬳ǝ/RS>r,C2NfOjpcm{Ll9vQOT>9U;])>6JdbXԠ `Z#_+D[7IIjJɟUh ҙ"`"a ߒ"G̾H`6yiCk(OA/$ ^%K^+(Vr[RR1"u4A.1X0=7f/"(o9/L1X{]q`Ȝ/; 9a>E)XOS K9mUxBa"'4T[Jl /K/9,rlCAj_TiǘP,:4F%_0E5IE'rX-|_W8ʐ/=ӹjhO%>| :S Px„*3_y.g9| ;b`w NtZtc> ײ1KĴ{3Gl& KT1ZWX8?C]> &}BIrwZ\"t%>6ES5oaPqobb,v 2w s1,jX4W->L!NUy*Gݓ? KmmlTbc[O`uxOp  |T!|ik3cL_ AvG i\fs$<;uI\XAV{ˍlJsŅjЙNhwfG8>Vڇg18 O3E*dt:|X`Z)|z&V*"9U_R=Wd<)tc(߯)Y]g5>.1C( .K3g&_P9&`|8|Ldl?6o AMҪ1EzyNAtRuxyn\]q_ߍ&zk.)Eu{_rjuWݚ;*6mMq!R{QWR=oVbmyanUn.Uqsy.?W8gqOg-?[~,;n9 |q|w.dަ'/>I#lX9vW !&H2kVyKZt<cm^] bCD6b&>9VE7e4p +{&g߷2KY,`Wf1_ܑMYٚo2Jh)8Vgl0v/eNEU"Ik dRu˜6Uǖ xs%P ع omWl҈sApX!^ Ɩgv{Xn|$̇d`>1Ljn떚F+B9l"UP۾u2Ja>0c0Vvގj$]p^M+f~@9{bOe@7ȱ^%u~-B竟} |23 Z.`oqD>t@N _7c$h3`lg\)[h+pHBr^J |r\8czEnv@qZbRT1e8V Scc6:$[|a.fpU`ZR֩bKgTlѩynۢ]dEcaLF&"AhQ|![gIK v~,Jc%+8[dI368fp*CDrj/Pq;'D@U⧘OS݊Oǖߔ ʔȯ1=WO^+%9t6O] o+n!e߮lɹL V3Os\ဝ+A= 2䣔As%(1Lhrٵ L.] s;Y4Ew`[x٘p,>9V"R}3_+-T1m8NP$%s,ig\Z:h Ћ߉n!r}_a%1pVʂ4[p6:Vm"cKӝ$'qg%:WNBJp`tɪj{8|hUui')gXٙqUhCO+JL0SU%jfˬ1lމZ. "^Jߗ]4XLE4atGS1px#S]MF˦NJPYDX%ܠꡗhl}i9f?q>b-E'V"mNf""ŦK9kǍ-vU #`uVi<s)/=r=nlӗЩsdLyVIUI':4^6& t,O669Ȁ,EʿkڍfC58$5?DX 4q]ll9W@/zNaZf% >Ę_"+BLu>'Ɩ=xɮ[⠋& ѫQ6Plp;3F$RveL l5`:~@c>q,7}VE-Q8W70up˳ A¦g/OEU:غA>?=CۣPqȅlW11/$f*0@б 2Dݘrt +qr^Pm:~VM/!m8N$@"Yҫ{.9Uʍ0\jmvz_Z p&>;d9OU᧯MR3V:<}xXh//T+coY5Ȧ4/m0NE(G2[+G~H'5ipӘ͏DAV$"*1]Y~ d->o ?mdI/J6q29Ɖo|& 0dS⚷a=~HJ-8 c%#QV}U]]]"4Iw e""dKY; զ(eJR,]ie~_ՠ&0~_u BT"J)J7B(R\(foR[Ũy47͝Yq{\~)ϣB6Id_M|ުA .)6.)[1:.)[aeF9L#d/Z0_WƖ+ܗ3 ;ssuF eqe9cxY\zl~<6VF&3| `y,Ƒ0:6 wL󟅾1W7dX֫|1y?/jbĨI&uOV( Z؋L/H&/兖o/H~,LK ßGNDKOvKU8Ϙ %C| q(/I_УHz<:5;Mc\׸ξaȂGq2x9[ښu/VWByзݟtjs} ݿ DhfNêkAU{,=PD ƀi\o+d2r }~zótOrm=V_>`C`}c8~dMqH//ro1ؒPh!C%r.n?W!%?R~7oK.I\Xl žC "6xjf*,I%D//),ۈ,PkeUtg>3bu=ϳaaev"uӲlEmxg8ܱ@qhi4+!OƁ'Eqj})G(QUZ<IނUG\-|^k>?m췔'fM@yLh""E^!I$J`"z5"W<Ū&rQ4)OLP˄OꅘD"l;%;tr.dQץ|=QY奜c[RuU^yHu6jXLZxSAZYmHS-jRonn|n<۵ t^3)%h NQ ĭ G4gi1Oe3Nj-=s=|r5w਍sqѲ&Ǖy} ~䣨Cɼ3,;&*\`S4}|FP9|^Wg'n?-"A[;yw 3%駓 ;/BT^g$T,5hkxrF`z7<#ﮱLZ~ߊ L[EC#* "0GX_u'|i<:/ѕ; 7u"YoR{K>65ϓvZ7x ]H{sZ h ֿ̂!Jq>}N /!9 9MEt|MIb%h4xAϓy޾>][YƜ*h4&oCUگ ֋/rMڟ9fW&=OrP%ǸuUn7uY}z4GBiC :KHt)T]~y(Ggk'oN[)[f*z1ݢVojb })p 7}8!/ݮWFItuahp|-`}&{,s=v XmS>H_'}8zziL{:GD $s@KcVmc ʖkIᨘ֒C2Ľ*Z\OmRͅ.0mQ4R\ Qg-y N\1 T@&@%2lTI4'0S!$yt[3L4*wkE@˴="A[E0-0װC NR|X})V>*oF ;ycTdz] -gnlCjdE9DΖ]Sk6tW=ߦTvC-Y8^ocŠ/"RSUE]I)tmA-hqW}]MLårָM_R[]JF0KwXMinJt*)EV з`G[ю$=UiMMZl얭nQ&ChPO#h(ZaDu6\5o><7nM@\ΊԖT8n+ n:5,4L&㘻G`c_M#7c u=; ,+t]y:}dVdwM P-k&}-5{լ"mzUJZU( 6lU1jM [>UȡM6S8!uH+fc"IGE␷.rb3K2\ICtDC*_E⼃ID۠tK&ץli|WӲfmݒX9r𺩶g wm⡪GO.u1w +nɫUy()i{Y׌e*!,VFbYizEm" \9]xmN\{`U vEf kO R^  ,xx_0}p1'<== c)sư0݅p=֤ 1W#@cy8z,g=Gh4`NFi:\/ᏮѴm,oB|c4M07F7F Pw1-sX 1G MwH6N$xzn`)g^uO (p#j|iN0y N#i|A\FuznSVdto(ck ׷Mkiۍ:'?y\v/oBc.T2U~Аyci.޹8sDZ}w]W<f3U]T41~&<BCݡT\? UbJs?Q 3C0* H?{8 !`w-Q^ ~L0lҙ9`Ȓhږt.~U$ӱqB۝E'"<[ǰ"%tG0ԁ=0Xfyu>i|LYEYͪbG| *$whƓ-괒8˓P@k-q{<#E;;,ջ=c#( s /I@FBTx_hwIPxlo .@] [1w@^e (A"<4]1m5i׳Msahw@DEB@'!?' "vYރ$ w hlcw]xAPގۓ!avwϲ^ (ciiwyV PjV4; 3 8E$9tI`{e<a8vO; M"ġuu+@#N{q XܾPvqRA,eZ` q{b/^  F2d^/Z/.%"< Mj8]P㈏uqrr]؎bl;[/`=9+dݑlF?\2A\U9 s'tCq1hDPRQ5!h$)W)eY/.Ytb ǭ{8`j=Sei.c}op5.<>8#220_)w}ͺޥ;L: s8ΆC}8dR, *R{e\5ѰAw>O4.2 Жp|hepXLg %߬"VjQ{$/y :V "FVZ8qFS de%ŔNѰ v85иKQtjhлm'PΚ <'RRU gLT:.ɀ+z +*hIFj%v4"-ţxYVE]Ӣ&e!!8ct%iL!I xF`[t;nBC Q vD&VRal]{emwwCﻏ B @lm@8<="6)%ɀ ճG+pX:fMײ-6ˌ$CjHXqZ0Lҫ7[Rz%ޠ 'GHGqDS72PsDOFR$^'k6-ϴ}eUi'3z‚cYf2GPH=$#2cn;uDgkي3-8k%ix7E}V6!EZdR(T8l\8"b;VՒ`9M99\ [-RXS5,лVðDŽC!v<[- ++1uaKӾyH,uSXjÌP#Fy5@Qq<*"[ZqÓ_<)ʣW7WbzL_On+X&򑍏΄sg7b%:NEy||KWv0J!n_4)JA(|JU˓gK"I^\'x~?E -V٠ jnjm>msȋӘY0#30%^`E.fq* |#f&c iljob+2]׵$0,ҼJqW@qq-뿚 8e.#B[rd*9'ƃZjg.+Vj[V渮wW<}d{}S]͵98?X~6E4C:u<Q5pMo1w\UH5lY |w@>L -أB0vlV]/p>nޗh&gxR!LU,"F1Zף 4YE6cG(nTWI8#8tOB,! |Y '&+j2!!)-ZlSiU YYq[8}a\6/9Ֆ:h^W4lW&I8H*hqc"#I 8K"j{`[šl&" z*m3P!}&:B0JojV@iުjڐZP[zA *[^c9>;Y(s=p7٠ Ix~ѥ7P_Ste:-gRsje1p{4fd,02'05!?$o9ي2ڣ~?w>XFs^= }nY?u`Blg?AjJm?L^oZM?kOh:;\quG=^/<;$'|Ubۿm}.?}OsW dih<6\BzgЪЏ%Mρ?@&`FZ]$BCkյ}_@kU6|ӏg7EՔ9 Z׷&zl;H`e:+)_FtUTsSXZ v!JaUi\jP9YWj)Ն7MT&ԣvةDZ.; F3=HʍB3vYQVARB!!ͤ|Qi5o [o5!VF{n|{ 9D{h .{}ɞKm*;5w%iB:h5![zrDTR8$ѩWI)*k #%4H6b2Ad{55Upxzd~@~Ba  yE8 M"WՐ, )I]R[ɒ_&|D2|d  =:Y=R'{/*+?_zUaM~Hkmo[/n*Ƭ`s?-JIm;=;&DVC1tx ޻y>?>x-& D7Е^DgKQH{c} GԑfHr<߁r0>"kV[MLqwR4%(iesc`Ӥ$()v(:PyCs OĢL. KmKqE<G%V?X`a٠2ȨRӳ`j\Vj"Ja#wwX=Aײh][0>aDB=>3ߋ_(`L`g\rGJu#d4hg Edϰ{qWhxBkP:ª j$+1#G$?ǀ2 bj:-4*c28Rv=*;~삚]z}FQ&B6B*jUݯ:r@'#C&>j d V0{$x#tǧ\3ZD?Z W+k݊ES$MZ sX%gx˃ѢB@ A#II'G^7'ch.0Vj4)>ȂɟQ"QgЍmJ 7!lGcoI'{BQdxP@D1YWk:* fb(\0Г0@/١arEܣowыu0&=Jl༯` LP56ִ*\AT"iֿjYC %~ iZn!I)U$&)$X7"&ZAeMRZ&41oKIW/ ZLc bPyB4z4HbԓŰW9Vz:SG3ТV}11jגN܃1Gge(!jh8R$gaǀjwz{ vB#ԛ:\hY&Ypޝ>މk>p8V sQɇ“ peqr|w=*b*:o9V?%(z!.P$%ܳr9qe7N:[a𖘘H|,vn1I{<`tC]F3ãVU0E΂ceѶfK?Vu_;0Y|m9I`]qٕaYZ <5ݠaR_K: ~Aqฤ gþ_Xp44aOv@Ɖq4 mcUus}\L$1vrPV'eWTD0&J&7-jik$̵ur.v䨳VD7C%@9G HzKd\2vڿ89y$hIуK*{93ZUG?$%- >ˑUVͅONB6Rn2MQ$HP ~ĉM߻Yx#L*jKA_+^-Iq-9h9adކKv{񘠹I'82,!?r}bqr{}L"/ǛzgYp_vy ; Mz(16 h:PO.6 !\1t0hx &{ đk_"Vcl>?z H/T/p;f5$m8xƾסȢ'kYЦ M D98G81\\{Cnltx0J;EMR$0.\=qkqfy/xO.rb3' RMJ\TL`$Gf·8}Qo45zad3 B7.f )x/&+FpF{cN93V(7Ų895}nG+?YYڥ? /XTGs,+-,Ź"eL"潲tޢq]B{Y#3IW'1A61jxEb4%KDeH 8֗j}=Xp~>wi1Cgq/,8F}4H,.;~1Wٕɩ.I'8>ciT*^ݬW/*/*qHRW'mW ݢQžaRH9i.5|QSC㯚{z(e/oO|Y84 SGFxLt^\Qco,ͮ Y{)e8澃&9UN< ,8Nޒ? iD;]b# ck4S|f?Fpm$j(M9hPN|c7Xbc JBĝI7e\_--^9A8p 2\RK#(FEBu͹eǨmBHy&@ě4E媼W~ ~ ka`gE /<})>7 O:QN"JqNX8j鯹tk7+|#VlV@2: jR!'NR$RS['Ν4`sh| 8"VzkS01 ln1:GR4k12ѣ=>w`E),cZ+D1*,)_8 4$ ]'*\{F/.Tdb1nܺYϺ{q&J|Ąb́ѼQoB~X,F F:>syhc)^d&E46Z(h]Up%;ĂK^(>1R F@'J^ ECQ$AyX]e^oXs8;<ebKE=\{0R|ϧƾd-I'WK''?],8Nn}pzx Ѓ6ST(~0%SֵѠ0jmBR Q0I|UE$N :΂~$f1s1>a4nڂc:={R[pNz.,Lrn&GR237hYp>uBvd|.9bBNsBZJ 8ZN;'wc+ٜ皘E,p34 '(E5hL(>G,P$:qWy4٫y;-0ZƺL}\"pIBݷ)+7 .G+U ) VUgJYM5,Lq2DBN g;K{gjW!}^٬ǔfrcSaRD)EqI.6R$G\ݏ;h)B9twC8yQ.VXѪq09೉cS$N4DvBܤ^, ]ÛE&d7e yO=0}}alr*89sI#?OG5yQ힋SJIQ/kx<p֋hjP]2jI}b軓J$m̏mI\!t /OioYzmy{z@6# *E2K[5XW!imz1/7MNtg֔oEeO^:[awm#_!켙}cC1$y79~X/#K^I<ﯚ,p$h;lK$_WW}U}!S6(޷,;BȞIjxE(|vlLDilT6,mFͪΐۚpۇX~6L/ G?|rrsq/4REEWVh|(/t)T^%]Ip,Q]&!C*湅+댮zFa8ZP Y2Sd8ܠwy{5HP%$ҕUXV5eꭁB7'uX-L\ ٧( }8Fok[Ǿ6֜6Tx=L~;>݄)>:?A;0| 3܅o~6Lo| &nw-v NrޘNz,BՉSi1M+;OU\gYMR-@$5`}.#>xW=3u@x|;ꞝE $= V O˺?D zM6>}{0;N7OM:ň*zq^v72>~KOFu(¨CChokp8JRnѿ|<̜*j.ZUv|qD%A d=nr`ô/#G]5I, K6waUX(ŠE['f,gqfy`|hv f1q>@llzWsf_L,V+\Oηur8HʹXE[Ց@5kXQ,wd(̛2AJUKg]AZgt|ܛ 9Tzwg9Nnہu %T*UnZbGuɇۯ:]M'5NUfUA1ZlUkn'8a2Tվ+ڽrJ %M*Gmf~YU*O_3_x͍)Y4w6:nu d+ʕi04gW=KMל=GgChT#-Z㦋à:|y:bJ]7#B8oVO\N7%-Jtos"R2vmsbL?Ɠ%=Mޅxbb.y5U@S]dGƉs$Pb24&3 I t}) `~"`Z -vEk PQ1X2h~-,I_ܛvk9l]=W 4e{!lipHVdX<fV#B$Q.RqkDU_S:%LڋqYG=i]?xHzZrG2,5+YWí2*ˣ? |3";YkEVS'aIgM}4x6I9ޥ@DIy=:; ,xa+Vxrc7x MKXj3o!d/Nz!@KR!`'YPL5iE4i6ɏ OU۹GU{le}$v@ƧYp3R 2;OڃZ<`B8 |œ7a8:Jw>IA=*G| ,?YlQ\J{cͿ7&sB>>^$$GUK@N#@;+u3_x_r7RPRP9`%-(M>dПRJrƕN|}:-Q޿& 8_G6W[ɜD9y ZD` qߌURfz~4'WG6+lD "'%w9"7o>N)q͞'p+]Ru\dFuw?ɷ?nG0Nv%iNp)}_ KÕ=(i~aCgJ;Vc8XSfާiKd{C. TMoQj*\mf@,ʆv<2'4HidͼI\sݨ7R.J71vz.eҏ*zf4EB"ߨ0aQrj{n޼]KV*ZG$A^` 2=pX􊡹fi5}^α9F; Q1U ‰VBhH3>$SO`<$SO\;$SOZv8$AQ6?ټ]GG&ai̗`v_rKg%dcB:) {:—5=)Z/1(G+$pps?j&4>/[x> ŕ%wEЪ#|]_?:*Ix&XŎw`KɅ&=8L{kEY|WwѧT|q# H}}><{8nbTJCh-&p8*jB!Z#x Qr¦SW8&1msuVM6K(GO; s(m!Z#Zټ[;\4oR`C`s)㩵![Jq5 \:l 11Z:6o iưne,XX> kG(if" ʴ: P+)0c A YyIpFV@Zh*ukL-`Qi79ǂ\ ”Y}c -A}3@9zr:v+C\O)5}8`$ Nxw ")UFj^qb=0fXKӁ  H ^Z9,bB;MȎԆ ]Sym(&;E(2݊$qf TAd-I@'AZKBDni6b zw xٌ-O\榈}if {=\%>0a':>:u%l@G6[ ,ZGRZd˜hyif2zym8HcH zAbD%eȽbr' e :MC5zd=,͵&wF/1ZraTHTRD񂳀R^Fboz}JEKƈ}iS@ll_R"r6?Qr"`XqԘ:`vSIVڭDZd5&#SY O!$PD rZ,$8|-V6*AM @< !^8A`&CC8>pj+mY0-&@(}M2xf2Ay8hȒFx߷)e$Y#ꮣꮪ )Ǚ÷2ٔ."cڥ%%F:]"H$Fm_3@ $>r铉W1z IpUt rinMG$LQ&U_kç 7dM| },{-rZL9\"=.)["IyF ~%OŀI)L5]J}5L*o! [OҥֱԄ2޺cIcq/IKKf 7ZLl'T"OBFjx =bqQ!U"4IƱ&',I$=HLmĺJ/&'#+V餏5 Q,3fkCp#"nG2Fֲ%!{K aE3E3' ]`IIA2&i`e`ZeimPZKUmMY[h\RqT{DR,kEtb$2\տ("ZUv1TƧJskޢ*O-2yFW)؊xdi\2,;l6t`zyP:=┝RFk[HQC 05Ja6kUBE1Đ. ]*: '**z)Ii;I$3.Ii9`[O\duRhcHO&= ! TmtJlIw6N5 Qݱ#1XD8a$"!|{1UslS,@wd"ϰjk0dޣUrFl$nRTOQ :Z'lJU!X|%r0_xjچBTZjP!z\<[B \N~Fi[d`kc{TF8ZmZAZ QIi:[,Q R X܀Htoϼr`4u@r]hj,SV.iKۢ?v@ZLeU1UUք5P\|]ZAɸtUנ'mXCi,gFneS}(ъOn'TX})8U;c8)mNkUT7-]`p0REr*\Z&)]Q+6N%95*R~s%s r.eXZCɒ7yWRr0bu48\LeKn(UPѻ-J5rMՌ*OxF%klxkO3Fmaz~FGJG\wzX,;gtVgXHuW*vRd[7`b巼F~ʏyn]>mKPѠt4 G[gSeL2Xh{Z2-%;V hcn|$LJoO̙~ȟNQ4ϳ/az;{z^S8_O½ >$v_z8|Սh]ly  s~4k.0݉fѤs~u=OBᄎIqIiPw}ՏE\|pϺ0ՑACԞa@,A}g?Տ݇JJGKQHyG= ZGT֞6ťibUTڝK%Qq1DJzg|[ZUmV!S||[5~i5r5ovh!Zo_e$+;L{Z?NH#W{Kr!N1DRDJkp]EreÁ婥%m Kg:>u ܢˬL+ pSf VIָYDVMj6_WZ.rb0WSe`*Ƴai[zUm9%\ rj{D&D: MV]\#b4؃b VUX%\0fDDɜMA:EءOHnh6Jr%5 g>W#6|Bj۾Z&#=y%d DH XZHmQBcޥX~*\NFdEAڑLj(!Wo\a4{|`wS<|J8w2|/n}brK:Mr?͞y<\%n"vgu4xv:b_Lieyr8i$\:t8q1wW7F1hnQ*q`Aٺ1JCl{%,o(a@CuW+Pd=($Ŵh5 LJ,E*e-ƩuN9jpSR۱44)YuM]*ت}Ro|x6½Yҹ7?j~`n%l>-{q u(q5+Ϸ4/+_9r?_Y=WMؾ.-.?W=ä/_ٳK<<Hŵ\CB !N'AiN~v[jYUhڟޜo^O=6\DxYF@`(/~gB(M,6nn+:>[ 5 B#c!z6~Q>(ϦF~3 hƣoE1}#@W'a `۰O:ZC%ŲNgwf>xIf;@@o؜ϼs4ȾcXɩĥ\;p q,X4KY$]B-YOb|F{sKC^Ţ?/..eK]eO5@r5&0v& 3ps=ſoo+_&)V >2$jz3vߎR ?|U ̙lz]7o[1O[;4I4"B^ Ç~DYyzgyl!\uyl;G[lS>FzR$h)l+ Iإg [u8E|k!!.#&U81&DĩhN KbkSELD$=R+cSǢxb^\,CݭR|Ѱܷ>a?E~*|/gVv~t2(?*Sxӥ8A8ƃշq -n6أ{Y~t 8܏]~d]Њ>Z++x,p:󒣷vX<98 - ^(GegV|9|rJ]/Adq2dkxg&{ =|2ges@Av ڧWՊdͦ۽!<NүW 'r}ٔ ({Ư1*a E`-.kP& .@vg+N\ ob -HKq[kۍGmЅ n8ԇi%v*aViVA!\i]:~jn('}0 A ^]/)+0ThWd!l};I( 0, %L#>BCA'=^P-z\%i*둔,֜I-$gJF,91¥ gH(ax&YX5dNnjxD*[ܝgۿe++M{ߋj琖,=_=߮GVGSqEDmWlOO'ߒȟ(LC 1,?jTsZ9 ~u}0}S72gţĦP%6]*h p u lNG0ǥشaaK3LAuݙP`Z6d!6y}6yO7n^,=\z;WaOe*wk y$}W[6->L*ik4maŒ93uqOL= 02 ,]2A6GyYqtfE|i/ӴĊuƀ ͒<> pHB5w$1{WF0;r__ W%2$ wGIduGjR:H@K]QYp{qk8HZ# -#}z~܀k<ɏJ1.Q8-Jl]LYZRsXxp e<<8>S5S)A{?s+eo%AeUú?GwQ;Q:ҿ9W {MH'a^I\ne8:=E'$:gJ/~⏬3wos}v~0$^I8Å~,5"EM~vU? Fk"4ϣQ;_]d|+'p-9`𽜯H?BgpRQ2Z[H8EpI t)L"7i%Fdy^p:ɞir5Px}}mQ{~9[VeKPI᤼F_KŢ]_`u3܆6_P *G@e\rx2◴N-f϶- <϶-l#L5'<[od|ņjY,b;l k/k'l_cv,x[J|]̜v]v]i̲v@v]i@v@v*@9]1.Q @-i @-i @6&5/+3.+%|e~Ԕ~\%%"f$HFep Q?cvw:qf-un217U6@ $|K[7u\vRC&(H+)%$rAԘQ㖏#D}{; sUXԊh&m*"(45JchtQG9ȟ-+˃$:RW A,*tlH|t+T, x5-:;0cBĈ"'V{0UfZdoGr=!u =MaI4[Ζ8g8|G ܑ31cvwČ[>R̊nl ւwI זL.qY46 jX[;wO3~`EH0 |zD: Q:R)e{A{ּ% ZBgґ#.Xヷ)qۣC1[;-)]#>R|~>H^Q"a9bH14:"-)r<›,=lǾ\RY#z'KpL6rq9FG#EM~)s=9AR6E/X9A㖏Mi LKR) (f)k-% $ yf`6=Y>RLKd͜ q 'Ӥ6QKΨ uZ>R̜Y ()HVx5$0zZ;!18'L$|$hIIYɣqA}] V򑢅f2mL=g'ق+<@4>h0~wObV|:y#V+ObEkitp<EFfQ}BzrO ]utrH|,{mDHaI(Y4+)9}L!vSd#Elh)T#jFJLT&gA+^Xf{|t.Ai!\ŞiV-Cx1 u/r刜޶-nlZSRk_;Mr8K 2/V̾n|\%Zْ$0JITi'ӊ'r z[M]Z>RLhEٶ]tCh`x2 aSW%kz:svRH%Yk&f-',Q##Hk"s+ITlH|dj'y\B1TcIu6VW̦PqG UvdT$ |(=ɜltqL3"6uZ>V؞ށ:'-RVHHL Fzx4abjncS2͞Y>t`~Fzx+4n#EY[;]ՏHpw&!<Kk> o"[۳j)2 Jbw1D-)fFY\>ZH6e * 5;D"8cO]t1[g|tFӛoxJh8 ?N'[=J8_J*yp g6@!m=ؐ"If( pޔݶy" qaS#P'Ң@8Jliŷ}YWo?l3kuHv<{V GK&Af B{U/ ND B-)f=!5 v…]\Osm p5$y 0^%)YÕVf0 #lt&`w:BHxdo\G=Ճ#ektf{70ԡpx ]=Ǻ#| >#p)#CY iDz2 frBS >Z>NRe]5h t§Ggdz N .T{8x-,-V.UN.(OES|k>C\R%f([Gv["x9q~AY sNOWY,8hT\)$jR)S椇1ͧPW8$嚒*T!|J0T֖mΘ&jH1[5cBY.Nh7NṼ~ٕH9XW&)"s̶'蒘*,ݿte]hК)G P/xU' ܯ_KC=OYvqF/pYLD} -īC. Wl i vhSJ3a5TKI h쾖f@#-|[+3J b .ZP)gT{pR1ۋa{ 6*>aiUzϾ =E>Ք(y^neJN9@-DN`xUDF'lQQGJS4k(ߣMYC"?((X=%/xRY>RZ7nT1Ϭ j=K+Buk56|>.<\Ԟ@#En's2g"JX .u%h%뉠&c7D5]ܸU1q{x;W)(;dprsp~f!JyAdV3d)#gԏREbpZ)y0hMbvh}ˀh )RЦ񉗂PNX7wm75n it@iPr NNvn_Ǣ;YN OjB(Cb{ ?xgĠ|"mu_*jFQ+;_SD(< -{x4 k5Rq8+ )KN̅=2&oB´J`96;lWI0(σ̌!G-+r>L#^E`:#+LШ|>y EZ>Vj>C'e*Vp6mG@9.GfvQ:r\29v03°G $wu1y-' ۻq 2{xlv Sk=!tC&̩_S)ZoG=˻m\( k7l  k<` OYBVMc'|=V"BxMpd?p 췟@:Ǥa'01᤹wwᨡ} iK&xtg$ hd_ћB0 jXA$M0h[5f"&9Œ:#||}+mѲLL8 \ܮ_؎i̢(ES&A ʓclaKV7a".kk 7l2n/ʀֵSpS|#& ͟j0/osF_zpԔL8riZV=~5Qsã+#鄃 7h9ьmWQQEɧF6ry'yH52'Q5!F5#0VaHBŧ~ڐ-n82[̔-ƇKc=蒾|[f3`y Djn o7=`&ã堞rlPt$0 $\v9x:GFl |P9QؼLs-{Z۵V2ZTije zb;luN>s=w4јcE;~_wTrGY.3a;7n7Tw척`d}0.S;a? X$x\ Bcޞ41NJ Yo_qήp䴬m`zVt3q4W3jH[6}hmr:8ij &Vd nUΑ<&F@-+ (ln׾_Jj%Ġ>2cu;>7Ԟk~fnc"w2 )OfoVysMcӻt]^L8A;|.z } 2P=<$7N&+/L@iL9~b>VhBv3NKrzx4ܒe)3+\ۈxK1OHUҀ3]mq+CpFbI?>MvsF7ZiVN")IJQ ;ROTUWW+"fyu iW,(_;zte攵`ɮqT*9k)@D;+(V2 dIwn?pET:KXRW+;9wdmb/s>c0I!o^q ӃR ;5RR2m 4-@[;Χ`W1o}ҥ]gs1c:bKQ{'Ǫ+V=SADXH@ByW,b?~ک&bVsN3-*`9'rKin2{mx6 "k_15t\<*SjY+WF])t^b\(45lE.fe<,L?!S+VTo G%*1D,tE,oKvF2L{bMQtedǔu#Npfv^i"X `Pd{(7T&,`OrV~#O/+4_V]b}7+JrG w"Ku?լ\-ow1n.z'=W5/>ahr[ьd> V8ͤ#0ʪpњx+" uCQ΄Y fFZ#${?D"L`SfbHr3-9igB*Ur67NJp xoTċnJ_(,@ZT_M/jM_DMY|,;:0^Eyk+K8l=ɴT0wiOdn rmץ1|pIoȨ+ 5ek Jܩó-N;L9W9GO%#zhgmPSh/`15hv - Ӄ#l<0j ??oZŘ9Ca1*ܺr-ܔ˛LJ˸!@wEfQ~/=S,T(ikֱy UjȄY5JT巇2~}j1?o|~]uˏ]3rM(,͸a]}}ǿ:?Ka#>D7ϳq O7X?B@ײlW'}%<ji1*^a "H"sAs(T/pQ}ja~V JMsP3>lfF]:a*&Ll1[0~S_/xoڞ=bYdfR"绸)E&@nn^? \}U4Il_,}+'ߛUo{wp6CSQwf>]$,wׯ{,jުb~ ?l)77Gb}0=\m#<ޕr&^yqj={\}n~@4P>~C;,f{~jZ (3vwx,`-2W1|ϡA C+?}ȸ@)aQ_>ft]5`)פC`լ}&4Z,?ݼ:c?TR4!| *1o:F3FLld?gz5z~u<g?7$MHa H??a%% z`E<)#-tUV*QʼY.@#K? ęQ]^jmWln!+1>xgl3'2I68a \nI2g%uosr3L-f(!:֣%{]~Q7g 0cZ 2iV@B"rX䫨 Tpq!oȗ 7b =,18^ygDApr_D[m![!6|XN%<ݴC]ܯd,GOz!_ !;1FvWdF2N%N<03~U]x6msrXx܏= ~-(blc 0IK>땔*YدQ3>@L "HLh3z s"5͉kP\.\ãEE-cK}ц|| ѝ0r%_H9 Cx**If{Zy3zB7[8_c't#(ZTg‚ 0#ī̐1%qLl :Ǖ C@Qq˭zQP'8,(3:07pITViL\h[e |A 1:E-j @4䰃8O@$;ZI7`m4_!F LG:/??Ûck33J;>53æN 32L'O\*(2Xc'<]8Zx`b"F5cFΙw;b hD'χ{V?L휗p=a1FATT 91ja*)0rF|!A5d.p%_#Pmô‡">u a`ŶL 66l$i ZYb^P0J9&ґ%sZp^ݍ!^?5cMkc"J:Zk1Ʀ򫝵󜍸ZJS+DY'lq˝X& YRv[#L,#H|IB7IaAx"+H"9B?Oyy(QQtO]EGTbtBDUq.akYfft~Ey<nvbtZQ5ό9u愇0 "(g 4؍|?[<hC9:CEc DT2]p1ea:Relʀ;"1:`LwH.$RXˁ5ub2/JXbh'P RREeE^EEZx4qQĴrW.v`Zb'鸱S74^d+U/m+pSHuX AJn<\ D`[jXJ@u4`Յ}ʭb0TʊjNMD(OzGL:3:+xֆu3 1@L'̚SߕŋF u򺅇ưT& N:}̄7n;ay*NcEeXN؇[4Z駝IY uYiw<P:eb1턕 Mwyn48NtGNx NC6F>1cBtBk3FPb.ar vi!瘒 3Jn-U򲮯ُ( ]-:c1<'>LBT|X>}1nus!O1t`m7V؛ص:u_D7^`²* #`jоMZ[664^l{Ј1f&M#fslJ|F:yb(~Ud+/oZ>qqd)ݡK^XkW)OK*aDNyaVW<}v KVy]r]ߠ:䩏{k0n/wȍ俊`ppL40.n[dm+%Eݶg&wb֣eQ2vD*V*]H;X/ r?!%}SH%t-L*2,$ `LA`˺)yADmA q6 m^H9< MO@MO2Ip 8#^s`I^ B\9lDƮCʀүLzidRU;?*NH/HΓ1#6j=N!uCBs;!=]^o Z*1,g j+ɡtDzS8Y޺hKu2QO{~:G|jH\4ӑ/Q3EN䞂Qdvz󳄀V=* ;Wt}?KVg3ًIg!X1';zz|MFz$JtB2{eH(!92.(&hX<AM=O%M%!#ыD&O_8۵B 3(R$eT@b?i!(☈>UBD!Oxz{ 84㓻)a:D Xع%3[Y B*7Z/0uyND/mN Vsd},?%wZG/:9e0zY2%tk/M|Zqgfw>{Wl)_J A?"^ҜǔfR YƱf8C'jƸ}!  #H1}#)"͍|z;oCB&K\jZRS0( y5Bf4< Xo]syF3x[k},P-tJPh&DƥFRe6T 0 q`'Ί5v_G{ha@!gftH!2 cֳI1̓ 3*UH O0zCSM̭|rj~Cbr2YBc1&r/twɁX*-Lf0)o,*<1& %ebDb EFO^_(*)R ,pɫLIRZY}O+@4~o|j.2\2qKihW!Fq>ƥ8aTjFEc=8f%ze!όyyeوʲ1ۤo6E?klcӑ<Ѝ6^^kJR1E7p`;EzA. _KwEدUZFFDbLx`8N(U%r$oj 5LOE6&>1iуB&DI1g_-kX QK{}Q8B&NbcFB9P*`:Q E#.^z_ C_5J oT&腙 `wl 8&?e>N+4nA:Ǜś!9> X&vXtkǺQU*Q`1 $hj;sk3L{3fqVIwޠޠ J/uIRZ/ Z[{ RgH YpQzznC|F'Z%RYו q#ʯPC{}h@ nMyAET6R 0!]t#l6t/X'TȈېgr}}Z dM׾7F Z̎uc~yᢍ_̗}G0V;$G#ux孡\\ybɔ"$:Đ"^2 Vk'rY^(4D:OUv>`{ 0x.*֎kC"[ _a4[?Xp ~{:Wh6p7x @8ML9p*$V Lw:pSl~ߍ]Z OΚpUl+G|5rOL2̓mo-|0|qԾ X?vȥx\+Y+xԼr5{q0+Ïw2ȗگu.ͮ6}q|__Uߪl Uz} ،u`Xf]s3wNb~}p+yW`@CU5-./ o0'WZ C24,TtJz̨/`拰Q8mhr3Zgϻ(M 1` |8(R? m< oQ.my'\0K" T+Y˱+\jz[!?v%~g߻ܛ._LdZLüLտ7童p~w1./r iPHbBiaf%rb g`9snC:,~ Z1p(, &bNcBl qzC| 0 ,|eT.>|L9djn{E.{wq{sTx\(NCwLax|e8o +a("&pt@/׎ZQ&->P067Q2-WjºF:y|F; 1ƣ+G0iz+ʥ ]0 38}]L D`7Q~cLmr nZ6 8is>kO5X#*[ZVgr}ȔPM촸L%[:av2mQ@yf}5mx[Wpf\B{-S6[64~KJljϼ9+Ũ]vA·O59G+Ofu{b>jyS5_lQU2qZ,9󄛦zM#ɴOj' ŇeSZRc9a$nNymV3S@-}99_CelB꽅}Pdl$B -}9r\4(S1B82)sKJ܇U17gǛ5R(%9:#Ȇ=vZRh78-s4G=z綯3㬨Ø,:EJѹBrk.ZjLi8Af3$fp\Vf4}gh0 X;&cNN!%?4o SÇOqf3oS3pI r:NT+q@KGR=D$%zz * *l\"a0YTOGp.6b捞9Xv'Qr'ft9L{Ez'fOXBc8 #Z P  2[<Ȏ`tU*kІHD%-zbbMTקPy[ׂ9@U$g#d| v"3bNa2Q`_reLA`Ml^q?qZ܈fnzMLH>B)IuTl][pnAv2 kj"Y ϥ.ʄ"pTp#@ 9h%Sq4y}쉹B4{2-itЈ Nf]/rWpٵmcUX$>%oąx^"ՁDӣz%OBxN^ @hdj9̼qBy5R,74}=2=:hDw"6}O_Q²\\P4u[G%\e%dZŽN"cecj)vFdprrtDo\u)A#68Gi,eQD"7\%p[6HU](sPu!r MfYxw=_tЈ N}]YY f/Z+bFht3y1li8W1W3G`+ A06hk؀~8t**e>K)2V̟fZ^.嘑'^fP^y]g.7%=^4"#kgN>μ.;hDGI'[6{%XR]/!}DFw|}z>'+}ݧ!T)^ )Q]D6%i.i'vCytЈ =JWjɐ6dKtf=Br 9,|6).Հ"A:hDKRlh$! uV(1dҫy>Rci5>e8;hD'/o>>kyekr|*й1pd$$?JOېW.uKtЈ N&陪[ǚuЌ /˻^]:hG/12SuYը:hEƋF)\G HE*lUhןWgDF&Yw4"}=zm*; *`RȜ!ݽВDG$_GE:^zt.Sj4VIiU3CZ4!E$2<2xVM$lь'wc'o L" LS\b66h<ґrh D%u a`:|OU#TzSN#ݱyF z3|]&UK*1hոONlhU14mcncKPi5x)'P1U=O$<2 "p>:Ǐ Nӣ {0 o,v٧3M3(.:ͼFFoԳn|)}l ]ç@޲Pl=|dH"t?B)ns8T!QƩ He팋%0GR`aGW;oSGx Rz-`}?^ϲ>%l j'GPP,P(5P#tI#28$|P/{?JipTd Հs,HY :ji1p( >,>sjA#28 h*13~бЍFL'MrZz[iJ8/0;ǏK=*yU }!@' /= =2q!txt=G7D 4" =o0S3DRzu;Jn57M'Q1d <2 ( `LUqH~3t*mEW/rQËl}4=BX4"#ƒsOnDRD#LJ9Ǚ\҈ $Xr{')62 , cƞ8F/=ÔWi4@g6+㠑!K=\(UFdpDؾW-I B$(P- 1pl$dz>]gm:qq \OABV~7 GJήzDx 4"CA)V]kRwj`3"L%r DoPdNZ'Ͷ@dX7Eq]WwdC;o.Ca^y4"e}E2^mqwrfzyu7Fmu\ld&.@c#[ YhӣIV 'hom4hlBs5TUE}r 879WMDDOAU*H5h}%Ӊqqգj` ;Cl\7EpgnWgAU_[Mf+|78%Kxf wV^O žPO. j0S[,Vlzx\2ySUo/QbU95%:ΌeVgpרg`rs,#Y;z$^|<(|~2݂ Q  s $Lqܟ/ܿF6ߏ ,Ǐ3cgXzЯSͧ]ib~|z7yۨY,j *x`M#]_>Qc^oݡ?/ogsBp 5foލn!))6ݒm7sCk7J7 >VSJF ( d/L'߯Hsځfڋ/¾/,ǘRo,yfY8Wܱgv j4-{ qW}.,JQ!0.Rj3?ZJPS\XЫ/uV_s/o47 >2OYG?GӮrO9Ck R:(#Jap:+5vRƟ3k.-x 'K?}Ҿۦ`WͨrZIFQH~utLsL)_Ȝs?8G'Nk7n44y%a[t['ʩ Svh&'9K^v+ r6I*Z܋^NGrd<\@Xk-0IjU=_-8ZYwɤ_7?@\hVe. 8/ 44dH+z/̐ʔG3{64RfDR \PHg3-dC6&?}FtE+zfFۦX3wRHtW-*Ccf<^xz26Hp\w!.ѥ.fLpŬDH%\KTNtέ8fT6c}9la5(kf,R؂(gH3C cJbK4Xe9+L옔"9)kaٶ9I&\к_9:^}}W[¶'Ml&R#mнQ8@i.QJ5u0%rW54a,?i_Vg@y"OzwQlw p2W.lOnE(R3 ܿev9kս%MK\] 'ux)*<3Y;@kk磘f^-߂=WUZ vu+餦b"j|M[f0n3<;?˳V`娆ӱOJzO>57NkwMK~r>٩9칅X{|q+ /;^A*k|T]bīۂ ǏZ ql_ihl0h! ._I~"|WH9#"*%7"Jr':Vf7Kͭl  5)4m*ZpEĆøOs  >^%l=[?M==(fc{{hӘf mC;֩ W2:5>]Tp4sY*hSn;"6D顚v|NH7*i/NP=oSn*?zykt%CFf%`w@[ބaeHf) q-D\)#5p#^E>`Y@Ro$|Y9 wꇤd:O|tI8 KuЕ"l2*8Ǡ64(PQxY袃2(ӱFq|qdhФ'cZqvVak5zV8JWy3c6uqQFh?%ݡN}ZSxc(Ƹ=й⬈{hp`͖y 潩Z#0'^l ;(Ͷy4k~cmM6~C9a (FMAyC#Y<ɑ,rEγyV+5X%"HU"^*X% ՜'*yo&^I%E) $7tH} cX9ISDNgbjOb/ftls;:όGZd`!|}s%.2u@n#vu6 ?)/ bf#ySЉ~💙)b؞u-/g7B=S Ѐj?[EoAouSΩdӵ{л"9̕ףFɵïϯW\TvZ\I>{_ʖl+d2t, .`m;g *#[5]6UC7VS,@iLiZQ t2b[r9ecuɦZ`E8k ѿO6}yX6)N - 0'~+ Mljt//^|2_zz{񷋷7 e wn?5@whqSjo]5UE՚ԫrCGU0Gv D䧫'ha},¾H©v׬u=9q@A/7qJvUDѪJ&i,) YG6DQ~g6XonH8!#8!dsar$rLL2(sVzH 20zG1fd!V{&K#?,LwAh.V B[M\rTS_^:Nl  Wqk;ml_G{6 Q9޹EȁNHjJD~x/E~x!0E~/$ zWx7mV,Tw%F`C |,>f3mVM ~2畅j(^d^奟ˏY$~Rd[Xj=]?M!u|(9T\*:%̷vf' Swv||"9n8ZUųv>OS tj7Tzs($e4Ĥ#"'q@FS{>~ <X%˩1s SDcK)#@;KIn6DX'eNq5o.'&{N#Cw9ϾP-I)%JXov=mwh֊T7Bu{b=H~I[gB!chist:1gyj$]kyݞq g?+ 2ӂK[-l%X=)eTG2 =Iw5;Ym!*Xs{u?]ϋTc$1 tXp /vcZK$qaǸb\X qaQc\X KE O+_jX&$aUTW BIG|i$gq*GD*a$*r?}嗿͒t޻8HZ EDN[cbl]u1.ݻr[Yu1.غ[ca%!=﷽)e GH4#x$Dh^INDaD39%,"h30l;f3F tԯ&jԄ\e)y~ cyR)cW&LqD\]zѡڡw'd̿Fd{ ‹ V[|dro>w^bn) By롚fR yVEmڠ_ò3-XX+\QAy.9F>K#7ys ]IhD 󆀱`TID!NeV O$G UQnXؗ)]E)\8ai0eiNnq#!RxTʨyOX<\_WW>OV8EV=dSX wa7izxZ܍:تG_ kٓ~c79_?Z^^*zj zٖ5E}ޯϢrMAXm$O/ԢZ?NEeG\.>wEl`n9 pdbAD@0PtAF'g BvHIT ED7P}_K:-Y1v iTHh 3!F`!q%\ N> .PGMXE}U=G gaXg5 m7;SBSOt[˩Gč HUhE PMl0!'~_wqD  8$I%vrEN>h9 p7'!N'е_8Z-w@Zm`qs5fx g ^sRn:t@Bw\34o4bnlggË; k2F(y:oauM)lڵ>qG\>FΎw՝>߮FW\ۄ~=yzq,0eU K·vlgDUM !j|MB.bY5li5Rȥ*[4f~̣W^f2Ѓ:;ZVFV:ZՠAF>\Ti㔞rO*P:˨No _t?}o޾̼??+!Q n݄kݢWmd[Uc}kTmY:6cfqf;a6ϯ{ \<7WyAnWM;p8l6E)瓔a\DӪͷJ";mUbػtYM^SpsqɾbB+mR ӃMtNRZHƄ$[xrQ`hT.A\SI(s>zb8G?'iцɜW7#{Dmxᦎ3@P{Ƃi'' T+,eSM TߥhhM'a6nG7>Ҵ]eOJ]\U-:^ĩxmxeGGղ):t)1CFpxGC;pZ&jˋ8h!DK!GMJ-4ʐH E&)*,0y,/( DssZt\[/, }"iDwg=~h˘<&entp=x Y&J-m4 {p@@7 27⿴w(>)r@G. Z&OG@hR Nj\I.䐑gh:gDž!ÌN% "\lvul^j<'!C1>Gt3)*bcdc}mQO`k?5 -hh72`VN[ij!;.ջn xhSܜUUӟnrHԜ V}CopjYQ55gq~B9B\5fСn~>#ž|x3cG|7}BHh`Xz^ m9ΪںLER L]kUr4ga~fc4KԮ01?3cp]l ] Ћֺu {|xeQ_jfsva f ͩ!?tvvO&aZQ}NJeΎ4/o$7؏CAPP3`9ܠN&NvNO:QiG>(o1DH{ji|s߆ZFL; B:cj!=W{%0+6ѧڦ'%U\kt-Z/6L{&lu;ar훛8y^q? T[ t ؓ7Jthd7Q亂yݘ[56F# S!zq8AA^k٘V5&Fet'VdB4%u|=֤ MC}n:52Ll@|w:w,̠qk$]ݬ/Dq=SkጮVӤ5{OE0E͌֩k@%f$6tlk7>'{8!z_VC[!T[Lc=-H7KRb#ܵފ6Sqb=Ήst8[ڒQE c;8NQ_!kپ&|؟|FMڇ4ߟ2=d/pŤ#t<j}ؾq܊'8@?b$JD=_uJQKpV Np̵u09 'Vql%s;ONbT6_FVYI_v%Kiي[xՌny 7w.܅psnͽ$&bn{yA^#_uzB[\́OP٪fT83]5|V}0 WS롭9}z-f+'yk9!t L } {KDSYLq{8N-pwu:Cl.іeI[$@&gIO 4ҚqɍA\eZسV_l&$)QV ϸE=PRf-,l\90;5[SF$b11qж `zDp;Jp [;šXך5mQ&_"XY`;Y&I` 3|t=, \頄W"jfN ª MsCX恲% H*bJBA1E A1`&(\k!cDkX;"@ RpjҔG-XʆFE5Y&4wacKҊKۥ=k ہQPN.,q|uQ8_Sx_.P$+GF)kfێ+pM\АyF5Nb804yA(碊Uʝ]xxY¦ve ejYSfA5e2n$Nb9z:&7G:vxmᗧjr2[k7BbIB{$̤dB :S8,;\x"#(MѫVXk ֚91Kexœ"S%`d\iA!8gD0[8۱֚1mqñ\J8)wŅ&OF Pxu! 0WjAP8j[;wIw|M5[uض;zt]xtf%;NxWiW}rqQa'HŦSY?miǷ-@)z1%a婗Ա=,=Nk1m$me38 >d rG^HLoyx){5֝TDm;0rh| ӭn8&!IQ}Co8a(\>G#W! >.gՌF/WZ/sJDsjE  b%!&8zBBVpe"!#AzI;R)@PpnBJE*(4$i{0KmԚ3LI\"jO8KNPX˩]ZkqN{ǐi'|PO2xDn^wl7L Fd ,G@Ãcxˎ c`̹"ڮ.ێ񎯞Iqo4Kf_y KE Ts7s-3CRfa?Fg9עۓ[ Njf %o\ǑAd^oΐ6zp))\H>bIPDRBHZcLH{V+YB2hdݙ.LBR AB R>&?`VEDd ġ-R <kWs8 zaZYNj $v֥F$J)FHX6DoI'+tHDS=/ox%^~8]"e׸>*]8KaWTEc 8o+ZgO{s$&o'6qLkS0ݐ*HƢiiCC.*Z%|{=asʼn$_l>pdbAD@0PtAF'g BvHIT EDvBtjDvZƒ. RQZqA cR,pr2*$D H4Ad#zʀH.FweH$e 0ky~خ5x˒)=LJRJ.l+ 8H~68DӾ,dgA겳/N§ 56e5ƅo/(R|1څ>1/T@/ʄEZMd.n'oC6N7 ;3T]/^WY l)̀dP0I0Es:RP똢w)FuB_gJ`HUtbD H *wW ]y빫qګ$Aa0*/bq: M50CxmxMoln|ۋ~ n(S*uGd- 9ieh w8ZŸ`'n|a'nTy#ɏ˜ogٓïewjgft&wsojn\( >ʜ%traCh9.Sb:2d-^%3/,Ai} iXAU%+>']hz [*AN꺱*R-R=Lt$ >?\j0fiΆUV鬒 Q2Ϫޠz `9Hw?|Wz;8UWuA⽻~{t`ONZH i5ir ߅nrk讪S2-ҏٯ u9O`w,ͬnq69?nU{EMqWr|앻`*6b:?(#8l]NfwS-z/ o$y)'qF !% <%ԃ^"&z'stldK-fP"+7u 3a%i$|sȁx@Z9,@vxMF6u0zS|zi1XH4 dnmYWgk˨ 9p_u>^T{" E&N(_μs'?t.U+ AVz< O<4_)UON}?jaRYX A 9C,+D<_d;AqF':)9 Z/hQ+i +5d!Bo!J)3[VlѱIYYyؖ|mgXܸ$aAAA3TIUk\U>b˧t~#aR/1",D&R/5eDDL"FQ4`pHno\/kCiL~uȳJ;f\b|2vL*(`"4*kc&PfTetۨop'%&DvixMVnָeS%1A׋^m3Gb|k|KYadܩzt#,G:N9Oc"Ozsr>`xӖA@G~u" ~rڙA$+vjJ_q~_T͢˹2-YX .i} S}e%Hac2PB}N`UJWCoKpUFQ{1SHA[*PkA#4&Ÿ_앵:%tUeKacۨYΈtMb.zv&I?A_Uy}6]+η7U.c-)@BHHaOE}31_Os9Kw2dJ?N@<&2{X5?|s'໥G$X r`a>}}h/rC4s6 ({uv<섏EB=W%FsfSǺW]*^fHgWe׊rM4۳-FB &7Ǭ?w* ?\p464!Zrm˃w4jk-¢WOuR(KLS/ݸ; xB(kB.0I:\c$N>f{Cι͖{@\ev/c`bҹVpb#V7.U ST~▱s Zx鼊Tww{JQt(^@Yb dَB f+ MU[$VrYj2_sUB\6_9!/ߥ&)!RRa H5T|7U1onjeP/ -4/z)ShHG =g~'|A:u).3>U^N lLm7$KsAD^"1e=zյT;ÁmƫF}՜tpi ۹"j)5~>T ! AŸ)9䖅smv9`ђ*t3|`w U:`B{g9>+:8;8ҁ[>WS@85VkbTKe\"4;Y}cM(͂8AFiㅑJmc})Ӣ3I<0Lc~Hlt?_9XCIy:i^8#Qpƒ H f \5U)o{܁qfɣE;hud f,!['?^ aޤpcKXuTy )2!EDMwԧ7w^Z*Wo26 \N$^!bW+'# I,,pcoI>c=W9Ak):`}2|4$j$:^&2 &uD9hCsr>X+Hӿj/a:C Bn3RQg2yc{Ni1(=n_a(n =RbrSܟ}W}uٔJ,1 "X¥W ʣdr(Ĩ NDE)itKp3"ا)RRayDpCLDXYh64Xw`M|2.c4xe15]g@,|wlśEl٘`Vމ:C̎pB4H#NktW{o_\w<2ۭWdϋAC1f]ojs|sF)gD\)rӇy 9MQ0EN93XA޿7iHΨ44񲠵<1v8=o=8yvӌtD\zvN)x-cy7% aYg6 6q2Q,sM<1"(6_Cc{06lKӵb%~pq<~@$sTGnq\n((L.9 Ji BRPn->*f<'vhFpNǗE]vbrہR?d eekWQ/qͪRm AؑԄWBB5B1FHm؜/6jVcgXVrEpeՕEmY$;b$R ;IHB[<p^Znj̏sXBÎmc C0[CYhuZH:#ZݺES6ckȴk 0Pi O9<:ƒx0NI參ިRm te*擐')3yVsRqfIm;ɣx+L vJ=ű[U% <>n p<1ym1KTu:R5`:QLW5< f [ ;W9/7ޖ5ISe_~ũo܀U|4p{W' Su*.|8H 8{B{ݽJ q{owC;!D!e!x0k5f,`ZFL&Z ix1ɺ#Ma}kb9ݣD-<ߡAFm&MGeifݔɍ][=*:ЍNJ'H,u W67:}g3R g.nunvj|ZloyP&r׶7yl̻N͖╱,zC)/(r1ydl QHVw:e&F7o.' `K[RjN4(iۤJ׆G c^+=O)ʙ8;{[ VK.^$I#og*sǭ$Y/6.)cD`10U^Eݹvsr~Ozuw [v<}QyahCDKK(;iHP R Z-1GCg r^"%1dN"aQiƝV2 8P (O ^MtX~a0H;W][Qfx$N҈4x9 s8PXǜp+SϾ2:"*Q'GK Ɓ"=ך dWbg80)%裎gEjVo\ f>1 8'(0./9c,EcHWf=c H)D0=\KA)c"sx }M+a UiAQ& -,VHP]v?üU\_% Q>t|f`>YmxMoln۸r2Պ - 9ieh w8ZŸ`'n|'&nTuZI~4~;^O)FLV=|qt3r1[Ќ| bw}S-ճKeQz 7~|ZGi˔ҹ:JwȐd8k {PZr}LpLt1ٻ6 lZm::ƪwtJHa#0qRӏEȆ1KFwp61]Yo#G+ļ:<"Wӻ1cd4lͦ$ՇYD.VK<2"#"xLTDįkbI^_:ћuo?aܼ7?8E c A'_O}=.Fz4>@G[^`䶺Mgc@)DN?To=5)}e'c⸩SeSgV7OT߽&ĽDf2GkxZŗ-lO$]nT׻ >k+0]*v઱~yeǩ{;GOCSټ&jq=lԒI4%` w2YN猾>js^Fg/2:f c |,ח&'  ڨ"٨hgQƏ]%IY$~ǏwlP-(neOR=h__ R9B2!%5+y>{{@RJ ,I`r:_lf?ܳ<}B"x>)d\邰.E:kk30!b)yf1!s5_`ȼ!G>ګМ=p|qI2E_~g ;2vq* T>^nq}v?뛳ҟEibd^FG,xOgNLNXib6C8ǛvZRcd] ׋O|w<0J0!e}L.2HD5ejt'o~~<1 ?֯Dht_$"q7gZ7|M鏏oU-]5wV%˘18 8}b>qwLkeP¡@] cN\ ;y_U?.| ʞ)(C 1SV;qˤtvxҤm6nHc~K'`Vkp[=8P=4uLMؼة;5ڊ[ r|2SB["=9~,oBH)kZ2C9^Kf(1-v[ҡ¨?AʾZ ^}pP QBv}Imhͭ*&]fc{9. xE?(7&18;00Wǡ2ݷ·&Ǿ kSM,w^ &;VvVc`tHzq{s=X?yƩ_S>`]aq }#`4$|k#ZVYFeCGCڇ"(8YYcZzSQo*އ%~HR3t=ĺ@.fCT4r=cQѺ^s;f"Rn^LczS@@e=3]"-Gxء¥l("?3:Xھ ݏ8glZǧz)j>r"Q\"Q;>вl j?Tt3.֐idwNrN\Pptq2`jNgCT+ti,[P|oj~=C-1pR;pʓI8~Ġ͙8D{G'@!p(.KjhOz|bn0I뼂t awGB3{qbԐr斗C,§,NUnWZ(_D?*A ꦂOP%`94i\X'O-TjtpZ;1]p9H)UP q[ ]=lѷpI1f/v0f٫.=1YAWd퀾t2P?<3 3P!!z}oȡ^^Kzvv:'Fz@Z6So+9%{ /%KpIh'ݻ/g}*֗ %z^NK &!CtR+e"8˒6BLh93 YD2%F swƵy-}Zwt>]r&[Io74i2*W>4se薓XХx66 }~=7ͯ>_1ZcƸ{55-mq5w`S9>Y i O_]ۭJܮUeO[{ӎ ?v}Vb`=@踴tt (䟮&K7ʂQ*֚׸ԉ->gѡ}F:mVB ب% cЖ8Z/j $ÝLOd۪' %!,ԟ4{5j6.ci7kkx0r&'Ds 8ڧјؑʧmGEb4J3Lu 72 b޴hMZ<(nOe7J\ʃ/ӑ߭ Gitwv2yNɥdoEH 2x1ZȑJ׎Ozt[K~Jfmypma#V9[gu0eu)?gL3cG?uO|gC{:.c*k!l.Y-& @d>ȩ ^P:( 6h9V2 7daY*7T\L "i4hP&R:A (IV8 p&YRczP1] tub,sAXF XJLi^cTP94DJ){ N$ICi(u>j0/3Q8V_c`l38 I#p:t0߃B jYy)c&J1j0Pj3/I3R$x 1tPyM62D#6K[gD:s33߇Bua!#l]6SiX4^;ŵKk"bWZ1y=(Ԙj0b>`WVj,.̛1!|1=3J:)YJ¢pK !|7k)Lmq)\QágO1D-2TR%j0oP*3/94!Qx`$3Μgv&}(`I@{%i4Ɣ:h`}9t}(Ta}wh%ӂ ) I@Q-G5RX=(T`AS%1Y[J8ϮdH#P)PyMm'sB p >$IRLX l߇B ب⦢3:)J%, ]Uw| U6AJ6Y-M=Ƨơ'C}=(`^i8 4cLf3B_*8ſcXPK͕._Sڸ:Й@Sɧc5J!ٌcyjpꅴZ\fiq$-DXUkJ)Gm0M X-N>04μ h1q ˺1Pyõt9Q_S2Z͉p4PykLGiQUgJ!s:)*4ԙБC1Z; 4!9=e8&2 7 A ,}tiwH Cw$3߇B K]y-E1zp ׭6߇B 敐FMBĸx, D{P¼gQMyTr*_s:Ծ 84,5Pd¥IFS,]Py=g"rp<È&(PyLmo`{d!D)zN31,JWtPy@ŧՙ]S"<%y"̀ UCΦ5*P@pZyǽ&`G^v9PE\ȅs޵#Eȗ=\[|?̇\&; aY$m]d#ə$߯zXh[mKr *֯PjguVP<@rD,qnC gۋOUpZH0{&=x[@xJ1|A@T9s = I,ޘUZe9bQ-: RTMzhcܶuTZ_!N+cm5ՒrMoCu)eFםōN{J~o[M*uv;EB /bƋsd;>]<P&.lڬ_eb~&dK zƓ dJR \;;ieO 'Т$=a~U5|;E EDJr36|ϣv$x i_Y,hL(:\@pPCCk4bh l)PmIg8vj 25_ >O0c}}7!|aD)\>s˾`M*5Sς r" k I$5R}/.!"59B%(J 1 >IkޑD !ټ|^>WU[>oEZpTJ[-+B޶L"<.8xeD%ц)$A@iAD&Q_1C)'(^kNտ컗F8@³r(CG`@'=TyԹ"*PNFqz7[y?jVwg%YÞم'q?83#,iJ(!j2<j)*09-N򗑎2#%ބ#2YfF4OP! B.sAfF%K3J1J๒CSC׶' cwz*8 Z R;-8RRy]@^'mmT%PXJ`yJZA\ٲ;.UBYV5;9q-v{#Na}Q@@VA嵃+<fwlݵσA%nG-N2 +L҄d38K·`փL '-%VXcwNcұY2vgk\͂ݛI{|RX*7Y-ZG]™h%8ldwTk$( ou@ $j `s{2RW%嵃enxm 1lOFUܔb52v2&0uI@ -eNj QG!"tJFXBS, ʰ^@R= a3BXG%1|v$Sm5;|zO^߀zF &PužӬF̪3m{gwQmjs#Hh/p5#'ڑLe0\+bX$%̚OV#M"F ^}/[ =f/$|Q:IhiHsfo/8%Y>oë۷oJWwz1ҷo#(9P~e5FJ!Dt]<Ѻ$_2AK:CԨuh'en{Q)*7L94Ӱrؠ<}Hfi2>?z{ʉWt9*l=9 GDBY.o[2,.R wYQ%qVk? 7CgQ gM6>(iuwITz>G!Z9RX"ӿkWYAÇB7TGHy9vn؄8f>NLDqZ8k)U4i /4%r Fa W g,>B7fŗj+^aigrEbˍw@`;o›Fn__~}dO-4P21C7D/(18-(K$F)RYLc#:"؋Lguĉ)CcϢox\"p%\CKmJ.^@U*dd84XGǹ1*ħ^kbBqbMgvgQbRSl͡78k*˭'(:"zo_?7j"ѕ)=8ʠ-)CI`:&A4) !*T PBKt2bYveYmPpp02< a.2½geJx%f6x J nE]IXJMz)\ŀpE!IFǂvTD*2Vdɘ5TU4Q @Qp ͝"0>d-[$HZǔG-(jUSyWimOK~NIeJ|R'G7?"̡lzaI"S Ad-y))+! 9Q ^ZAGVy\dߓyF5Nb804yA(碊'^;6%u4qlUFEL1Vz#0W_! j#UVN->iР<춨Jgl?Ci׊m ::2J;:Z^SG/8>9dB R>&t09X %3ڑ=hG 1طc 83`э2ws^W5oY:PJ)FHXDN&TV>J \|4u1H8AxgKP"Ք))ZEugo B[+R~:.̻=D+]1.Ŝذ\]r$s,JV %\QgZI$qoҞ+\gO\ϑ3[d! A-xC.@oG)mnୡ‰dMi8P\f BmO,&wWyl  Flº G#HN&4OJK#o]щY@9 IO5"H6VC"5Gh(劸 xJRZ˘l*ԯ 'j*$D H4Ad#zʀH.UO!(j=ӫ"'`uCb.|^̫ ໓o0,M9#0{,O/5=t\LUT(Ey>Q;?+:<RJVr0aHf4RGԠ!SQB9aGL+)Gn:s?r4OWSmӍ_n~}9Fm-i+/ՅkHӢm/ޭ:uONg5M[#E [ᐋo-M#)l vćC8y\Bi޺jg#m\ul\ra*0=esRd( gZr{E'Q5~5YN{AHH+wHBk9sUfyBa4nR0S1˂ӫ1ن^9*#G],kԮw|94;a!#y`$/PS1=ո}ѿnwȆN۶\doP}O7?߼̼?7?;7HYIk >x |&pk违F54Ж9wqoYƘ犯/?0?!bYdYoNW&Ză *.F1?o "@3ޜMv9pf1]~3]n|)z Od2y//( Dsr8\[/, 4 9B0s62ͫp<8;N׀eD%`d\ Nhc "Jxè/RQo!Hgcps)2y>$\2<*>D>[PoMI; 5Ʌ EUZ&'$"1\ "HVG{Y[kWq>1OmP! q4>O4UI&PA{N26X݃=[Нk T܁qOe;YӧRIh*CaLW>GA9Y9Zև#{%MZ8jb.%Oŋ|:=)\Ղj-*`8iϝmp.?hՓ<_V~2oa,~XZdbo^Pcp[*PHV'S.yg+ Vޭ Ż>|ڢqMη-]wݻZ$7Nm:/\Cν~4#)nklݬ?֛Gr[:]ْ{7S[rQtn7VWw5;r3?W7_yߩ5wͥƪ7)Xb]x2t) Vop٣8b}V,=YZI-rP»P7pR W\wu-0/yȓtn5wjqq7jC1UGԀ%Bci uy214@/>:ZOw_ͺ+#}S/o'6dT}{҂~RMO:6%T@=Y6c)[e^QU횅"Oh҂Mj*e=^Svs܎j|L0fTi"b>D:5ZKCS4Gpy5$芩S!dZNY>SrG;ϏomCN|YQ+#ZVnZ#! 6u&iFyEH?~'~(w}''=>#R4Z[r7~BVy^#\Cd*V"]죢)XN!ԣ(UL^RiNyɀ Is rqkZl52BT;d_7%|ͮߡ]Hۀ4lhfy^_yPŦö|}Zkku|ǷnBMvvt v\ fy \4CjI@#$U$<(<@fM)> ];bf|2&}kbdDp8oc\K&)tr!0s:pMJ]D/%,!!nb e֚s\Viݾ6%R8ۻxҪ%{k&C`e7[[4 V# `\KϹIbcY:P\_`rpϣXWÏhـ YJ)d!)]D AC2% 3tY4 p@>" JUbʁkhH 5)"X4޷a76:t阳1m**d@H!h5jn$ 0lc+e;M4Ӵ]jAkE=0>u']~B$'[&wZsZ7FRZykk+ aR7X$A0흑&Gז9x` LfA{tj7kβ9C$#}m|{ ߌt])'W:pE/~rקgŁލűCG{BGp>:jd~/i+>-#.yn\mQ5ܲB(-M jxGv~+F^6ĘE9[Фbfr3z p˥UY&"#YXVIo4.1q?# g`HVyL ZkQ k:{}@O60qiI/ROMhxyW(\h^F\Ca`"'@cKUT@ è TfFCVuI/2{D.K `]୷AJ\ 'MKв֜ cuoBNSR"%*ƨBrYi@6$R ADmƤKM!I/I(}PL*:\fKhRGd^Z>rn0RUS&f5&gvVzXjj.ǝ=Y^H u(%˄G'K. A(E uԝ=$+"?xԺvY1N&N[{=#b*U6luہ{$XK7tA飢*H:x.uywv^܉g=VvOU~,-W_o.q3ueRiY$][m]| 5gwTVߨ4clu,[_,JpF6^#]uR dcJ`0SqL ]2F e@I>37+֚sċWV||g76 qOύWx쾃Fݟ+,kw5-+5:i ыdeGGPGHuy&UtNDk:mGyDN{l)X~喝Wd{Ž}7ʂq&ZjmU\U#SظR}Q.V59ţF}Ҹu EiR|PNfNK6Ӓdc;]6-Va-BG}"gLp&r ̬cYEYČ^.W޼jN_'$qsCدڶj+`0MLL:Q]M٢ 3gںwPf2ܶ^&:/~/^ݗۋWoUG TpnȨP \g@E"Ykc!'&&֜V!y_mKk;^H2Ƿ__sYmDH,.9:ٗc'x??qXb(Xv"A%J, w4ϋ^AM3wB}7vbC'D;R@5eT8]jo)zo^t{WFmha]]J}gGG sya:]MpF:g=|l 7kzsFSyFɋo/|`u\/fOׇ/oDT}E=Rtk-z hκ?{uӛ+¦,wa9r`wy{5\Ņ4hیecK-Y+b%<)]9}g N|O-agV3@R 4aXkŌ/7pcMiztJ}Z$i< 46?W1"ʹ=L}rr<>;]sZ+(nU9+F׀)%]KZ'ޖLp U&iv5yl1ҏVݿ&O^6~~llv bq?{v2-vi \Sg$~)0^/tje56`اN*>?zw5+kl}NuU_-͝:*8ӑR""/GX sU1?pr7 lMVz*ߒ8~Gų/__pO<ūS.}v|G#0Xx-V5h|U[VT߬jiahߤ^fW{S}6l/ΗF|]rAzM 'KsS&yĽ*~Mj~x'NhZ2[RcBx]bFs;4P͝U{M40?& ;IPBb< dIDH\i1\R;'wf6nχp乍}塎9h!hPDIܑ>lk});iPg-we8]7Z+ lm|vҝVhq.:=矽/Lvv ؔҠ B=8﷑%x E uVxNW=@Blà{Q1Mpnk ʎDL;"]vjO>l*R66om$,S%j`QXc#e"fWx;<ғBKIM 6$ 6\1@T(JJ޵5qٿ嗉+ a'Ima2i[ʌiR!)мdEJSlr|8X$e Ҳ"4ωky-yq)J{L_oL=w`sy|Ty"W!|<\iL2&thN xzRhGד񗯯1 tkSǂiϩ"`BeX+XNa,N,X[)%tϝO@+d_}Q+;wiE|@fa~S/q%ˊC0qoHN;kd4Xb#VG#$KSH(EYp-a[c37̷jV=[|T/Ii&e3MIhiPҎчOOUH nK&%N^Q3NOh8qnvCPg2 ߖz{7{q\gu2 o]Kniݳ돃vֹ0ڕr{E`ßoy˪T9K9&' ?sMGpЏ_ ZZp~n'ŋַ(#ݽؑti;+-.<#-ʖl#KЬ@ު1&sE*e:7 \NChڲNYC@'}=̡ipu; ( EMIWȈc6)V)ǩtқmR{$0$+8a68RP$ X\ω_aO0?'bq Kzc @cYorZjR{4]ڃݍG^.k;|4DڼG?|㜍''uf̭opVxɻe.u=ֲ_2'2 #ƘC nSs:vaw! } 9~>?@& g\FIiw]GI$QdZ X/bMpo 竱[u\;fz7ˬCiSfC"zi.~7o6rN~ȵ~_kjNrLJry*|WC1`{~Kkkm+еMi0o0i 4 ⴹ3YVXvi;̜-__k>[/(#YM"tkço/YK?%YSF8BX7=yYWҀ_^EDW4Q(iur=JrsDh{4Q(P&erfG==sgL8}n朕`㪇PW=PBCzu15LqmBeCzU!T=PB ܐPgCzU2^V=PBCzuRvIPwBzU!T^^TzҮWzU:fCzU:ECzU!T=BCzU!TU*zU!T=PBCz϶`X !XMøy-?BÛ+0k8O GyU_ȵE3(QAԩk ˠ-4K`XCa+atL`RcX3>Ă)1Z*# ;-vv'(u2"/u]na y r;#$gdx}':O'*Fj&&8a.x!GBD5rT;OVL6|;+Rr d2H#v]-Goei͉t"8wV=~Wlxmʝj'se9v4|m,Zt`P FMj[jwKϿs9Z4*CZs8KCbBT^{-TH|ƠSV dmKdʫw=vU49ɫ/۵efwӯS4  Ysx'`sT^r'yPcm&r# {KĀ{5˾ KB*dJ8pϢg {I#^B' IGep[>V}c8(8TiʎG%Q,jRU\dR:UOvLO{vsm~® Q{gUb͋9u=Zlqb J5>^]JPh$>˲y$լ 8SVD2Vk9Em"*Fh-xL=~AGzT!lכCx^{PGF1<1J ak$UG%D (QR&P<0,wãcVlRq %Y!IӏY7p>%挧S ' /ZRi 0Q*rX( BZНkz4hac0+8=*-7\u=i||I47__ EN]12i TDdhM0AGSNΎh-l+:Ғcv4ugP.~>O[1 n#Fy9F-SF jUiҔ֖(8Lt?ƶN]  x ߧ!z9^Cw{]]_>}go .ڿ]{ۋ70zvn{;[~|/;h|OObTzG4oUykrP?$չi &Ό<D<='|#Ɠ7٢fA<@=nƒC%Ud,(QcchM ]3[A<m`n*N.R')IB\2`d+̵z_yppo8Rq `~5<(_,?j{Ϳv/w^{>dl\4?fA6׹x Li0\3eWï/5-QC&{oB4O^4?`cD;~hkFf7L0^4~m5|[qE+\cZ08irmt;K4&q5' yC0([C={\Ge9]E^9Q̀B5brQ? 巉lg-0|dYK_^7wj٘firZ7zMmzm}m{k~ ՊjQ{i)3ƻ\\?'˜:e%Ʂ,sQ0#D.RP^3 DDg.=~uz05joBn枧dWA)pF&O0B' urA' !l"#)\yrz.۷ˎa{hYcpdȠe$qb1bՖ$;&X, 挤,Տb f&"{J PiZvMI.*SE-8BLdP\ lOp9!@8Z̒*vgcMq+%l8z-" rϨP@0UiQ& -,VHpw%0V0ow zӹo|V| TLzZwNOOW;F%LuB%8wGn cΩd:aeԨpŝ15O)\uPx=l.+f;#4mqen:\\sKlnDEz\!;7nc-kinE[bᖮ5C6_fDC_'4I{Dfmv:G0[%V'\겱*X;:muÄFR_C^}= )"|~@T~YW.\gp /ο?u~_g~x/0Re]x.c-:p鿝~ӊ47i*FӚ9fiW9%'5G1qS[oΨ1~ֿBUg-I͹&Ʌ| d~U'vFk^Qꯋ drW>%AhG|ݒژax}tWxZL_d!$ Tp11[(;g#鹽 WV!z!,ن:F0t`B@4>rzQ vxPCQ_ܾK'?ڞh@k\nZ=Ols:9frիr^RT΅:=D9w#ƚѲ1!NVcjV )`a\?Y;0QRzJ1`}{qk q_;O8/F`Zt?;K+ůq{aDFm\0sx͸w;}ɹZyj3 G aaen( xb3_M\ Vta"9'=T弫DdQ˔߫1OƯ=[SGQNM$KJY4WfSnaLZ%e#i/9v֖R2|%gS4aoj*"ot׊]Xe&x8ɜ'T)`V_(?}_W4u4"$F(Y>IJ(2 ܽ9&"ScՑH`SC) ,c2m5zƦaZGp s O`: k+e|kUzGe7?7r;6ЁED2Ęjq"6!f`7U$nz0{l7f6eFRMrBF)$O5=&|2L%,!Ő٥;mz!fl*fJn78A{X azhtI$ACe<8qݤLW4.-x,x7LKi$Arl="Oͤ X:ЋJlqlNP0%,mPqARLrČp_gEcXђ&& i1-cA"yDŽ1^vA2vvޫ:2VdMZ=sp%%R^oWqOW }̹+}v8/+GY9/e 2_12_j2_/eE/e 2_/e g3.ʘ cdL 12&@.1r 1Ϙ cdL 12&@Ș cdL 12&@Ș cdL 1r&~ig),RDoԥԒ,X<%&;=E4xfFQ!̡C[yu{7mk|݁{rJY`.¸ V0#*)dFXR&zG 6oݶ,=9]4&zn[!?W{ w1:Net뮯x E YoW$2D:<.߆I |wQ/[zm~}piaFto3X.ܘ-Բ)"rh+,ҧeJ+aR/1",DDꥦiIh'leLDJ2lCSԒc$`4@W#0N|ݺqΗz|Y?3W89KBJp<}ec&PfTet`E~$;A Atȴ1/|q{zngpt|r<e{20Hj=5ʚu0`}AÇ>ezwSbw3+tI"Δ(>oG-N0}#3ݍJU;.>zݤ;{h7:AJڛ+}:E;:}yݨLZ K]RPkppwdx9H^iv*[?E7 tEf./5܇K_(謑g.Iz07.%aBk|s/ʨo6G_ W_ϔ?Den2$ t|&E,Huz\/UOƦYޣ4<ߙ..+;S 3sNLX((Mz4Q廂iɖ^Wz!v(]թ'Զ] lIڶM7h23;$.Ϝn3'vYC1^_^Ìvܧ4G&fr]h dѻO N6C̓;R;I]2Ak0=}<#)gToF9Ǯ]tx'vU9a:цvMf&24v.N<f ӢFawےîM&J87FJˈH%38j[&8njy1=wg?:ڭ hG8QGmQEyA6,X )~udS&Jˇ!vk0:>g],[գHK_YlE X. 8:9{6f/I8t^}J50MzkwyO|mBls{[\0sB h6;kH)EԒ)E 0s(arkV0\*ZYj+7PjD JDf bO'FlUQNvS{1/GRdф`X!b M=<|#v@F%|^Y%VH̑ьh Xn8Av q.@ JqTQ( "(rbrX+"D4o;=;Jɳ-`ґ#Q.2yn$7Tlp+ѳa[J}#{R1XkV9L HB9UaaR"Z>h;I4\ ƫD0US!n%}r(*g 0o/ݰ'Urebo*5a4K)%[ʫɹSOPLkVGN"J0d*GQ: \h)`ӳJZB}%ǾPG S\0M*Ffd j0D`Rmsֺ3>36兼e^3/yiug/oVۑktoh 8v+q5F=LQk %Q!`V0/2%.Dj&[ph,פAa iU30@ L&Gb9AQND{ Ҵv}i,+10BB2Pgl!1z ",iP΁"NFbRP4@ SZLiMa; EB}ZiUFjQ9f?0<"'8'J,s)qĕ_(T@ၱXjT ʔ`sxMF-4UN#Er{b K0ZR4"cb GdD,DŽ1^vOvV?`Y'g!RnW)l# "\F~_%Tc[Br~j[0Z[42lrR+ow;hG5;^fp!jSe#[!'9:hOi9 ms l'o<( :pITf&&ȅ` %)O*\R!V{˅{3Pj’Ϗ}Zf2L m-GDtQ|/,xCR1rY$+1&2Ci(!,mA4ވ #_uIIɝP,/KFGa9)m]nUPyw27 $h.`َt{< UUMޮ#Ƥ :def^{09ꒆ  u7DAp"?:N=jkE|fkB CE[cfD4,$0rV ;} ^ S.Kr]  Ɨ_n-{b%qG8bk7z ^7E0st0p|'`b/ǣ BOn^ gkGlEvڵVGN.GCG\HX:o&8̃6N \N%3N Щ!up<{r>?8" Lۓ)X)yϘ݇v4bhC{Qss2q[֙ ~~NRuqg=.෻4X M L_&UV8[9,H}g2¡+&Sx̽OqF9.+HyNXJ3}20H<2k2Fʠ]"DII~CG:i1F&8\ɣ2FD,}I,#=$y7'/ sT99x≨ BEeODy#-]R,U}NT*͕Omwxy&RV7*C.xÅD#BM3CdM)ZFp sr7aDa,1hfig8m5"2e}f2!;7]ۼHz57+5N_xQ7;;Y&>]]̕Ͷ^Ҳ҈Ay)k?m>_=V00k%Io7ה3&JÙz #,ҟu.pl񤐛TB_4RgJKlA 90줬ǭ$e13PB[0:7kADj"x}y\pONW^o#ЃfQ`GֲX*R蛈O bLC2襳L EMێIL.U ٵ޹sWӆz;j?6I:Xwܛ5m߼enPëiIFDnh+x5`uPɛF8'XlbDLӦâWFʫчje2,GYNF>M->Y:RYUHju\ 7gMV(`Gs)-+SLچQl7kcx޹ o&ęW%b6O篮Pu A{L&T7}fu{N{>/?jos]csjT%7I6̩+H'{+Hѭ ]V,rcދlPQX4LH51dIe.@eoAɍѻ-,sqQq!=W ehfQ`옚97_&%>E8ft=߅w^mIn2}6O u4_6ѩuCͬɖNm$fjRˆZ.&>΋tWZn -nN].<9λeV/eh! x5ؽު7d{/YsE];UnYu_Juh [onT{PuD(2pR05 ^:bQ-%ɑ阍!5DsB䄒nhc xY1v6D{ϡh8ڂDl=<4> E`\UV!?χtyė`;5ƫh\Fhv 1[{M|7GasBEx%+D H&%tylU_(%$]R*f] YAވ';`&1Bf_m:;_ǐ|=&_?'É$.]lCvː[G p3=Y ,Du=LE\Oa0B@ooD~9Ow\V/9TR<55w>a܌6HY"_}.i0tk{k o:߫^nz9JP#l{-w~yV[[+Zb;8@_r߽d^0{|9Y!W/,XPX(6V}2ݍwݽjl;e+6Gma>qEvAe;&-w/ko?!̿nA(:{}qovۺ2W`iyeB{kqAw@\vFm`7XR&36XqѼSTZ٬$jX'Tkv,tƳK:k@yeVԤ?q؟V T*p<*=2dQJhr0.1kKf\V\f<15!Ԅ,2y8zfz|[ix޸֡&np|||js}MLIӦլ pahDkC/5`uV5brBH |Tz"P%Z3+a`E 6hs)Bnxd-tb#ur)pAb"&3&H|2H-DfvBqK  Nl Yp/&0\XFI W/RR1@uHzO~u/G-&PڡEx^$b6T)Ozfݙŗخ-/C_>N352D'f䄋9aQ2J5[Zu r!(a1$SI (Q9SRv ֜.A= [pl5]h|۰bX<5)vk焒}$H;z>(@35!Sd[|?8p=xշf!P' 8WK)&Vk" {>L(' #6Jx0@4>xc}&! `75tܴ;;~<1OrϳY6Bʻ7ϓĪnoz)ǡMIu9*y *Xh @9,; ,mU"9P*FVG#$K3 b[mY[kz ~g}rCQ 5PDpJ[h9E4^kA^W o]ɱ}i<`o4kΎiT"cehJTPIRQ5D%U;T[G\oXXLM$*bĢDsBCQRhpgtpg'ܜg_Ϗo ]SfFevMѓUx 7reӆykx[W_ ֓3l)c=Mpk =þVfS >Y8͋]5J?%qA O9wwWɜ\=Iy[u@fjN?}t}U Ӎ34V-_R_$܊m~ \)"w8,hƵP @|FK=Y(~R^sFRߥˎ< ڷS;m-̮}Ȗ>X\V4y?]k͍77ϓ oH|>-mf> N|3jN7nBմF隮UÖV#@,fq|@ o`ż{ݻ@uv:0'keduN.uY_5;>u71TFrƪ6]PIV*qN6.߮|3Nǿ:|珿(ӟO~x+_pFgeMPt&c-yZU57kؠjd7״%> 1% ?V?ܾw ;4_5c 5 ӯ`Q̯K9LwVa.VШ>捘L,TB睸$ eF1'pj@c< u@A0(THbP\# (.89G;#iކΫ=GNzcuuٻ6v\+~[y_~"HʸV5ؒ258dVTbn(<1Hl+˝"˃.xo|*Gr.:M6u4x-đJWGyg,);cik\/]&ɳ._zto(UGz\PPvRaN VAh9Ge\OF'sLb\L){O5|Jq,S)#e< Q`&+u[ЗDa)VN~W^[˔(Y-z@YƠp5wו HC"@"ώ07XA`n!k :%c z{ AzIƁX0|̾QKC%2Ecm)(b4i-t%Ez "RN9[B(^Qf8 c89Gy\]kχFB,*JEG*v(g'TKbvMQXiBdE@&^ƮIF43D/ kA h) 9ԇ/Dt,> yz+&UH/ZZ(t|NJN ,Do![*Rӧ9׎QHzANF-GMǧgy)~m:>MMY/7nWMܬ~'yۣ= ~ lpֱoe4#}mR3_ѩ*=%6{n{ Kc# !-e!r߱cAЬ;'O BY~Esb-a J 2 I5EX,\qFR|ILqZ-!-mR`,suL)/}VB nD*dH guNW~5{=p,k*_M5V_~c:;Ն͊!:]D6;&AJy^kX㫙eFx)3iv_n'+sQկf 7^!}f1=kϩ?rs|Ƭh2jZfr9e_Abtd{s_%bJi2c .3-.) A& UI"4o5gr\y˶tkVzJ]}v,/YZ)NPpS f>4nS&Y ; U1ޏ<y|KUs;oCtpT^Y?4Cg4o1Ǜq/yL$_Gɜz]•hpŪInX>{bVwn~rg{cRx/m?U`tY/7\ܜ,{rS |/ Ÿ^\>caau<ͽME[p}D Γ9r{ `y9\{ "W^(,; >LLaڲ^-N5*6\T4?|m Weze|?/ʴZZn-kMr[?4Ɏ-NH*ikBΡ蜿рd8( RYգL1vE@y%T@ )3ZKFzϞs/j ϱ?s)!Յ ڟ^CPZTs8{(XZXw E# tBE{A,^_H" $svѕUJz)+QJHIE 5% )u4TZcHʢ F[)PDaR"(s.+til6!A݊&֌7Tê1㮰9wͪ=yFݞ?g%!e =T#OFfh33Ό|ZMsl:HPqprig+™*p Q;gLb eFTJH@1D($eKSKP>KldOґ7EF13p&@0lti3 3S|M.'_o~Vӊ/ε5B+sP؁=TmUn `QxBxdl+3?[sJ]bY,eG cXỈwEb맢%ף?˅kOȀE(iL95 uvG%3db.E"x! j%[mN\q&v6 K~~U^lfޭ\-o|`{wgW6˷_]3hCi%0/"(hYl.D:,6RakC! 6ag?>-[5Dek zɆ1/='(, D91$U9{[$cwJ ^بL3fҝOC&qF8QDe#Z57'iQwK_JHmQ&JBQ|",NkUL*9&b o4~Q]xS;v5j-_r^:zf '_ӌͪdn*^)Enr/ǴYbio9{PnBcꬔqT ! +[Erd٘o /z Rmglɵ(|\.Z*V=65-%ʮF)Ba%Z36~͸\D6[Bl ׅ'z"K|-gAnǿi^qta0x1?:5vaj#2:'%IN#T+ʌ 1@r 4T/d%tJ6@4vE'$*"Y:pk0KhfXmkmkPU ):+6 &M@&"aL".ZĨ^uH^֒nX`GXU 2dRbIl $2e^zI;k ;P OU^]l\w>YBJ>]^COݤ홶 t>8"Ej/j}tT:5**aږ$ /PFBtdD L?'c#p8B,:b r6ѳ7a'^Bׄ,v~s>A7.BQF>>j֟9"x QƖԁ*C'4iQz:Sq`"` `mrҕiMY6Bi/ȕE$a@o3]Sx2 UkfeNpnft=‡w.y;6͓:1T 7Ro;DEѹknzwދ1^;\ P @.{:hW69M#Օi5Jqiubv>N03-1B+0ZE >&]ZJ AΫˮVV1#"SЊ01it<+TJd 'tt>A .5ɤ[3սwoGU' N~Vy.o-g4*q͂xhKintXt/~8A.Cw~|D")ɚ|')4ϺFrty}۲dF)roK}Z=-oUڗ~Zyxvx"0\Ha4ՃKp Env4tQϞC-]ѫ-P3jCfN[xv'hieZ65ClU[]wrաz޴:o7ְ$>swT6v쎖;,?l:[=G/x?g/KWϐq ,83Dp"t?=xM7qdo ?T~f d+_uike4e_n(bT#!SHJX/pQi^*UL% 5x|$I_ڰy}+YJ1%r`ְfea] >K ۷!U{_'uFԙl?Mq}h_L:)MPt/I'UT푧O^ʫ8sZ1bS\N'>ec;1^zA_Q?~~Z}>[,7[;~&=DqN_f߹MV;p}H%J&.83Z5ʔ=7ZӪvp7OFon%\>\)6kbr9=kAMI7() ')pڵXB,6:ĸ(\(3ԙDQśEɑIhdUy`:;/TR#EVAp(x6hG ζd$`(:qeRPOgW5'M8)&>C^!?~V-hkSE^ z,rJQOӃϷݓGu4!:C~CZ~[؟M~r]^Go''@rkի٫|ۍL Zވvߟ墫?#[|P9IU&EghW0Hq42^C_x5Я]^Z28&L&LޕݽGz0n5ɛ(',N(0I&Xiǭ|cY!)Mo΅8=: Ǚp$Uh1#R)o!>>zOTRjO!-_Vu$*Xz‰slwi/|~rw\b_1b_CvW^c~vqK%o'f2IN xIfmﺟ&1"m&G؄wehK܋Agp_՝NĎJ ǧ?p^5uѷx0/N.x ]xKȒv,_ r<qVA{Zv\rkݿ.Րn=_b\lOU盰-{ŏ08m[ [r4#hxl#/|pp7qYBl>X5JeԔZqߵWOg];}]vɓNvV]v~4tVZ\V(AV[H!Rށ&(AͅWLؾ/b"YPYkKǸQŔE`avؘQ4RlW;AoʓS165*-TuԐ6`KKRںj8a,N@RÒ" QCO´4=-$BΌaJY kĬ&U%-fʂ}9(+ζk0f \˙.]uHJtE0-Sh&aM:JT1ToDp 2:Z9TYٚ,0Jx<2`a"ܠu0btǠ}O> feᝏYȫmsaC V%C(d:AE6>n WeJV#WgDT,mQuZ WL6{g!MZ%3 MvcDYSL>Ǒ֑~Y!iQ.1cVaOJ=j^:gI!)QebP[Ycծ,}3,HX.PBV=H0KI"oD&DtTˀVA @c K.! ]FkX5=*(}ɹVԄ y} 6~)QNK!Bi5YV+mHowiX9xZyk&Lo 7z,[Ib]"BU HN1ƈ)A; B LDN0,lv} [.>n:"KJF%H-GKc007n8xdgܰƁ|Ѩ‚B'؅z%E@i$A2 ! /}p|R i. Kc4nhy 蠾" QktsS<oĭ 1.]',XN1P? ~[̖&K^ƙ`$S`ªMŨ<$J,x— W EUR"2ky(%G&b%sa@AK^/B>JhMD0} &2<+V,Ek7,,$Fxw@X0Ga'YE Y*B@z:Cє КKdpUHG, DI+fc޲PR*4mFK,:(,e Q. 5ؗh/5lY mb.6mM:ܴ֕{|:Ow&&4m۝716fnT\L5JG+40z:sp0S+~((r2Hu0jsj Q"eѠƤA/Go9*5Po5p78%<%2 [9)1l>#FrٹvzTDˑ ]gd*DE2T Y(OW\-FX7nkdشhlAT(mKeBV$b%1R%'c;H RE^2`X0 Rep!5%&HTc,b"Z@' ҥAT.6)uj'^al۞ #6iFz@(5g/9v2 @0 >Y pStrXVKqkP|P jloA%p-r' 9F;6ne+B{#@?O)miZ\I^Wo[+ږI֮Y.97baW2Fp-m/Gtce]lJP0z/: ZGp=8e 8›or\;+.(G UA@B1M[;%[مN-T:yx'jvd*nV9r@%s8dzAl,ÖjT0J8*sU:09./wHE17w̞ٙ9J>z] y/yZJS׽XO)>ujbywp_P'귡k-߽HOͻEpoH_d_wnwCoܸx%MSӳ ;:Q 0SÙBr!̞W*A Qc}+xY\=y`i޿!M[C5myżEO&9?H\#q4ĵٓ5_qmtu#iFk$Fk$Fk$Fk$Fk$Fk$Fk$Fk$Fk$Fk$B⚃$%x<5 eO\]D_ɋIB9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐Bk@5ZuL@\q<'/<R)kcu!r@!r@!r@!r@!r@!r@!r@!r@!r@!r@!r@!r@Z@>&x8 P+ T29 䀾F?+B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐B9 䀐BknUucf;/V~xRMvw?~DŽ-J9F[ hĖl֫l<"uUh+<uUZrϢz &?K6P^=S8_|Y4`_t}Ir͍E~tS0`NG)1Z*;!f74Vpt7[i{f~iD2t|0j `'XQܵ6|g]Lk񒃽n5xyz n` \*r" J/5~%\1ڲT6 >DY!LiXU7\u\LjtdYx7z5ʹ3CL3&ٽ Wn*E˯o:) wÁo596 F5߾9R/bGƔy6$eg T&seN/Rb#kU3A[gfl<.lg e˺P.|T]8<z& ꓫn.vyƽQsOnJț^w+ PKVJXVRS6u@> h2,%#\HMhUcsóf(pgEmrCI$&f6 163a*ʹA5vkl~ЏKhzǾZ[5jm`7.Tځ4 2rcjw>-XeMIzJR}^E& e8`Yi/g,RV29A8+n,-|Zg=9 eE_"122YeKϳj-_n! 0bu> uOuu>?'mzUUbWW^UJٺ:{)0gt:}]>+^YPg-X2'QvT 6,Vd/y Zkٝ>GJ_رV`ܧaKJhiM \G> ({EL0sRd^B$h#MKf+YuYs/{}{h0Bz>uSXAݜ~g 愘ꥮNgȵޞ^ݼ_7"֙M=~Q^@tX!|@sw'_>iI f^\o99Kᤫ4sf5 3z+Eԛe$?A%l[0.)ΘJ"+|9q)C!&Wq{ l.K!(󌨒Y@q`X"KIMDsQvA6jjjKr!؞V$4*>Cw-b 4Z_߇0؟277S[y[2'G:((a$,q%ǒHJ:EEJ+1aWKvbםaFzρ.*,OѲІ#Ts_(pbh c`Bht侨XjG->1D͟Ĩ ݷzЗ&d39 T=RN~Z}oJY z2DJ$+ϙ%LOʈF͟7jqTS0'4DUJ$ W\f-\A;#kRr9RPpnԜj 1 24DfZs&)Y$`x*E* yklNvK?B5w C>kb'j͗Oe#2,7X4^´"r\2X+ƮZo_JiդI˷lyVVf}37[Pj\zϨr=h4D/[jN*f #Ce&E!^|*F>W#ΫP}Ns fRh FR9]gB}ߴ$_C%8ՕeQTSB2dY+sZK`ひKO]5g?us0A)U󉉀j%boLol|^xAJ2!H5G*3t0\ "mӏe~L^=C?]]ԏ~m{4e3bY{d^$|κԁzTRN)5*e9aC&L@Y(4b . Ƞ)81p{c89IĤ0hI-ZgüP6p"I7T0Bˏd<.憬b_.{78;mx-aˎ7V@$qEUԞqx $󔈮)=WzuavWe_tKϽm]Rlk~횄\_eS5)dc5˝ ژZ?A V,?}x7?J&4}?LT_wȶfUs#vڲ6zk|z4hkgSn=8ӫ~; m<;^^) NYsr8C*KA0'w+ 6 IERp[* \pkb3f^`Ǻe%>G2_lV%ҠG.򈝤 I2MDHr9Ekm$WER pvCEztF+9)$[)5-)ͪX]R&W6sf$;Fk{V2i.z^!.,/'x8C"Z$~Ei'H FikPʡ,Ϳu{=qbw!6p4G, ;biK3_HbByz \M=BRO ~K3 |p 7 o_)zn\cQ dM)p0JKZ$0;HxRy e eDŸ.0kbDepٜ-q(YdF2ԼЎ䎄&m8c)e]5LEf)%`G퍓p<"gS*) =t($*rH 1*%.+ 2VikH%aRm E¿ B11 i@nŅ)IBBf5DBX^:UTW] ;|(Y$#"Cʉe%ehLP DyK<6M#>:xmZ< O/.tzA6<} 3Gj.I摄Rjp4k k1g_ܧFq=SC AL阳T̲8C%̾u "7Bh d؜qb[t??u_V\Sw~rI$# \AFG!&sIPy_gtֲߩk;E vmJ$t|3̞1{ƼcnX=n᫛ 3V;30O{bڲ^5NQ:JֆðUgÉmkȸAO v뒔pʛ#Qxpw5% 4߱03څLR}qxtgWZ<3Il- tP{GLFc0]XRfml*_8jN] 5INE_˕[<٭x g(Wf㤂o e Hφ'7&Х"!FM2gp1hxBY0 sX`Bdw@qbW]*¦˪NoX %iujP{ C;%\ZRJ|z7BI+ChHɰqFA^%'>{*!,%nbKk9hsqϳ ɀW18Y!f)Bj6mnCQ9s4 (H! JeOi<Ӹ ԢO"r>R/ɷ8UhUz#zB槚LѹZb:=FUpQ= LͮqlQ\iqTV7*TZ7z +"r}5⪐+ݵBmU}ޢ\H\҈WDZQyr*A-+zM\YNv;/[iۏ_֌]!&`G%?297F ddgTÈGڳI%çOo5YW$` =Uj.T~biPżV.f~aG8?B--O~LCQdwt"{r3F<w,2]:K;j4`yxwG-`Wt.wOu^_NZ|Y# x17%gl>Yw6u_ /U"XwazpazҪQt;B^蹐+W\j˹qUuqUTWoP\ a8W$ _*:y-⊨u]\*[Wx8UM6غ-dٔ_1ou}ؚj,yjVkhtX`d |rZ]@*??&EXc_Fo 6y7fMyjWlH|mH[ں7-]C*Or6E"zeLD8=x:g K@2K+52cT;'Hj^NzEߏ黾/~~(` zzW˶HuﳩveS#z,]eK@hS# qYEwv&HC[Hݖtm6N~{.&C&%VYXf.% eCp q3">w*hֿj_uͼ mTrӖc& PWq@(*I18kw9hVrK51Ev1l%VE1A 1B*i-pc+U#g?oBӴd JUj+UU鏶ndY]y.M{r:puʽۙڊsR?kWU*߻BIB["xtpY,u%SLhC6ѐ2hdd+ym-fm`vĘ>Is>-_2F)vƩPUgop_Rx]\l}2ۧh}?MO.3KN)9΍\J&0'+!%8sCU-@P?ElJmph2{̺,%p:ǔY4ĮF~&c\3."S,M/{kǧ2Ylaː蒷1  e!V0+m2}W keMlonF@_prP ,46al r;S@x@>ԝb e;BviQQMdUi'@.fN)Hȉs ܖ[oOdp]69yRw"ǬS%26FkO. (rx~w+Tv?98" dkclF'8\HˢW>0\u Ua/ޙdzFlhqQv^Q4^%8omTM(PJFrѣؗdGO!Y}DP"(i49rg1>XΘ RGvuտ6R08KZc() Wve/K7ۓIOq,#^ N۟r{&o[c.8vd[ $7 \d=O]OVnGxCN< ':RpEgLHUJ6Ki2 B!UْM/LQ)]Е%^å3R >u3"w6+yힺ>mhtC/rg{&ŏajȆF"v>aɗE$ym0Lv̷pχRr=ON͢+pZa\j+kakڭa\$zB&Ӽ~͊ /fQ,6,Yt0sze FR?+:^]$j ZFkmĖtݹt9Nr1 |B[ D\`fF*R>XtRYp2 %ҳN"o R1 43:Vs~w MdRɗiW3C\:,8}4X运D1QldLdM'e?6AjCy N f/Зuz/RiCBϑ= c=z })tg}60%)[zt$* VLa8uk!G?çS-xj7[W)ͧUմs[gUb_KW߫'/.-N/v;WR=Y)'Q~/.Ƌ E#MHz0d0L#; Zz? ULRޏٹ LdQ Z>hF]4W{Gǣ#h$ }ܿq"d%qOq֯N9NZҩPuU3\:ۋ~y~{y˧7o}D}ŧw0TRfo ߷@omojho44Ul5ir3nr }L>jhrc[n:KyGYAʓt4흐@&g~ d~]٨C ՟g dPk_E>bGT%ܗ/ M'qOlBd.]j=&5AI^J aa-x<H201xIE_F WcERpk$=a|.0fXKӁ  Hpx@Z9,vxN#:E%Xc8a1B;F6G,YmX »W{[`ϒ iQZZ]մO~vovUj_S[o+N h VړEzgփM9m~wwD+$T|rKFDUM/2XbPEرKGRr(Ĩ NDEi d =!"}2L"k/%%\2N@%0li1pV'+;Ʒ+zw RQ553zf1f3Ia}7|UP!{'J" r; рWg;yz]o3[koIQz6l>O7YInsx>0o跭}'1Ż@ ºu{Kn"'JќdskR?A tklcjs[5&:g$MGz$1(Ik#D*aT[-4*FsLc6ȹMtdXDcZ)"V¿'شb8Y.TX8 Yr/`YܷhTillU=>) Jڔ/0%8W\on\w\e\`^k]4`-+Y3IbGRHeq2=8kY,Zƞf B`kC Kw8dkN{\PHid2aR/G"`"R23тFQ4`pHnZn 53OXr5_V`h n:w5]N6f륑Bv>7ëԽ4ƿ~r)~О{V!Hk40KI"Rhl!P*UNxWfs~bR~y-Xhm 4wFm6*G@O0$ jCMYН(vvή6E[bW8l\SeOʣ]]sW4 %E>gA܀ RP[ ֎|쐋g=9-$+w~5w!%=2t]>7+Y~t2n螡*XRܙQy\ӢPszeXt;vyک*èryS5ҎjsKJշ+LnZ9HY'M- #]wKs(%̖0G3uK lτ3R/&J\~(ҫ D*$RV;v50lH6M׍w>a͸[N7n%yg[ƑXRq_ õ?ToaEtk1}ĽZL^;4+|fw3!ҰPp ^'ި[FqΠ_xSTJQ*.x%q=`œqYUiJX' NZkSkMEu\:PL*oC|/.wJu^tR%#eDXw&>"Gm8xd;AmFJ% t0lq, FmeA+mAjFk46<:K!?nCj/rN|hΡHu,9's"A)xsHF^^ s=ӇrL; .fF3kT ayJBVB+:un:MVTy:@noFTBYrIJR!yF9AR鄡vRz_e'lRjGjFE U`[Pkg<y,![nu '1bePfJeie,:u~?˩]X} XjC֯o%\ ܿzxh Fk\cfD=*Q,5a<AқWcuX^'%Xg2je<|ZfRoS&R*tTnO3v-.lҥwԎydW[ӘUe}dˊ-O*IA;21XfxA\R SmjL&lv3Ur<#_gmT=J+Qٱ&4Z*~[θiW _<7E+xE7O[")ǔ g23= )i|9I>0GîZ h1aή=HyJz:ǧFti<:.ϸv9NT?f7ajvaswxqeN9UK%0~{yk!h=`[E%gy:cpj(OB*?31_~/&&8n'۽0/5%WOiOa0L]\bY3ouN ա5lA|L] z=r]֔.oPykUޜEi:"S"'p%;%vl"'($|*ruzNH]=`gz"^ Ӡ$Xh}vUm&[)8"vhUWcaWDU]@vEJsu4*+ѱFήJ1UeW/]QVpx޿_AW5rnSD]UWv}SwOfhj uADD$" SqHU*1:햐2=w 9#pn:YMRQ t+>F1Rl".`DǕB4qҟ/(;e34hKbdΌQ 1`O$ we=$Ihhj{WH=i5yK$T< "_f-22 JEZ-AƑ**+T  )`xDۓ[pӨM4eh>[%-}*&<8|pfz?&WqV#r1Ơu ` @&:P)+V=XbR(ttF1p|1yť^~/#;sZSAzl@b*tZ)>D*qa(i kx^&"@m0褼d&{f$SB46६scq۳eKۊ'9%]? EBd^mrį.[L&k |(~QZc?/rW[ڟjڅb'cbbfhfl<)@ E,aXb [BzH>D$i,a$, /d " `%HEj<fPrv2e!I=#\/U|\NRotqEbiN!g>&* ||P~pofj2HCc2p1 ̇[dMW9CX>BI!PgwZKdB'0:\0<9Qlf߃ #HDAHD,Z hta1]}揳xC.݇4!1]-Y'g叚 c0*yV<;K3Z,{∯6gFM5ZL\kmˋɸh8I_2~:{$:L|=hbHKoHƮd0j0v r;-Ou-~:]7zr=dxaq|׬Uͣnuk bI@Js[>>==o/'TT1tԉ :>v6Oǿ0;~~|| o\PKp|~{#&pg追ujho>frճ e׌{|lm+% ?zNrM~5Oܦioͥ G|'OxL'0<뢝_okq N33Y:S:ps.壛h~5 IFq! 7V)cJ. B$ JR!idL6 5 [0yMK>QR )KJH9K|-ǝ!Ӄ.Őb BިӉQg /tv&XxAn /(1Y)BKVk>\֢k/tЁ+tp%m69{ѓhzgoZMѩصu8XyGd̶1ImLҤbe#Ŭk"2/MfܛL7+=h.o‹9cbPJ` EٰtTQgR.2Er,;EpgQYxU-2oQ&ʌR$uɲ.s=Bq"Tcth{v:=o\!!9O|EeT% 1V{BG--;AI<BRȿ#@րpDX3$@:2 'r%!@EYǫ.W jBF.H (CIA1Zؒ/”/) @|0]՛($( d0D5Ή! TG6&^:P2E\v=bP{=I+^Wv2Wž*Uɤsu|>EBhИ]wijT0+czMM+9`j޺gY;vu]vޡ+*U]ڃu=.t IHI ehL0@FObD'j&0ZI%yWj~Rʆ\+_<蠺I_hZ6^_F@jm77u~n9>~1.׹r|w7IËryJ?M BfJ9WgVқhtsW1uB ?oˏ뗟 <{ @8_Kmm _=z!l\Y5ҺR+?m>:[T̷ \hͧRPh[[KY= lRxR-Zِt-T_-#JĒ)V_7Eh<`b2ZYoV&`p¬UIF:6t6*,ϫ1?hϙN'\dӳ`${dQ=uv{vV;VE9) `q XUL>6h~v}O }{մcd&g@[(x7FNózz&MycFc{ԁT&kT1I@ x KGT>Tc['/֐sPx Iʘ||g 80>H hmk_H[+wPke⌱Y,AB. 5Хk`t\hkq̈́OTૻO Q7pAyV@lK>/} Ϩ~F)óU{:=+xLL6%׋/]ؚU %7FKDZ:^½T} Y6Q(qkMUqS| B;t(HEQ6E*T=Rl!NVO-*AF"#5`NQCR}L<3<(ױtv$@Ve~~~Z=60Cjt^Ƨ[[y|W&W :ilu\o޽ӳO޵[:{$7NmruoR6w]{=CKIkuíʹy;Ǔ-.lI?ͻv-nNn;=_7LX١祖O7Gl\y3h{@k=-cb{|64[8ϛƊymS*u|:FoיqT\zU uF@<,Zg1&lT:#z+3@@_/ޚ&L}tPl?bq^Z|&oP DL~h5Wi^RYkT3/&Ռѧ.ּ%0V"Y׬e |T&=l/o# GBPyiEƦ(+jMNIdaN zN|C778>>6('M0xnW_.4v[2𱞂λࣇ/1=4۽GMG|,!>%?y3 JhE56,AO{<Î=t{?%*e@$rthF#"8sL :L9b)TFFǓpcR, GEVQ&P,BJEĎggRnO^|s·gv 7jn36hf/ESD DK#z(4%KDZdܚi(JbL26)P9 b$OPtb*Yw5g}t\nʺTgzMez,_HVعdTYOq I AyۊS; Ӌ#ߠsgnI2,O: ^n<,mʖX4Dc!x'4_KmLAx4 Tl/K?,^TA˜>FC؜idanT}v!\Snuo-ۃ.Ϩƴx,dTDgvI.%&e,H%YfK \ 3sz~q=Xzzq X :pR2 %)Y1djTd{ x=Q=nE,,0xT(+$ buXnSRtwLU?Te׽!XoB54+8T0mRE]KvC>y'^ꂉW5?%'FF(D YEB &RRY\ҳ-#^>RTnt p}S7l.[i.e-yn8Mt~6zD >flNvH_n;:NdQ1ݼ{nrԁu%W1N4U$$!@`b@@-D^ az Na͎{X(y8xxy%T$kSrrĜJr! [5R' z$z ׫'0wumܺf? ^'!HjXN7(ɩDPȞ:sPE}5esϒMb)Q.R(wŽQs/۟G{ק'"F{l_/Tﵨ.hV]̬nK2+[`b,b}gvu #RM=C9H [fJFs> .Vǩv\P{aui”Fl*{eUٚ$dsHP1Y2U_6d 0f&XYXSt⨠G(Efj9Y\qXqA`MwsI%BU׹EӸFV4JyJI÷C15#hl,A H jkgr! 蕛@Ãq9Y;ל:Sq1 EZpqYCqѩ38u/]Dbus6IXU^K5+U#6zu}wStሂ>N4 '7)DR%K!L}q}8bqF,&T̞X]H̪{u6N6 Mma\j٥jo3tl(XͲ2y0CFG .I9s>>G){|pKXۦ!![\z9ysVK)ٶ=&1 _}%gJx_LJl1)6)e (H-,WDGOC~j zu }l&H&`f@\ۍ2GkLcm%WShZ4šXE)'*Q漀=9q6wxS=ʯ]G7 W.xDT~kQoqaGĎQW7u#Ho?"#ZP`ARAQMMR 0")9^HHmגMu}Q>%4x-Q/SzހNXvr'5=FЩ[B^R%O Tf͊[ޮ!M7DbEZJ"iS[IdǴw*vfFabk%V+لU &Ɓ89x/%[xl+WymRM {Xlt řb&"*1GEƷѺ9~==A9Oځö%~iVt_Ħ;eo6)X(PL4!bvcMm>}@?Vˡ)DMF&\'^L'Έ~,觭,kC`#RKʥ˟TTr*fS %XjpBir|Ƀf]BI&/j I0s^(\wǹLn| OݹJ|ԇ mSta{ϋF>g_|=P]EK"k5 h(Y "?f*Xf;ԃK1qN(ZL),2I %SNr'dPguVa7wmO﵋=kqLW,+E6:4k7Y\2k[ a&Yp:\ɗ|?r4#%O(N E˱;̺쎽~enW+{C~381X.1 )+z Z}C #XK51cͦ43?nXzxp^o.eKB:agYX L3CY_Q-6(a-cI \D8 "[ 0, ]ucViT]5Wb46rp&"Q9y(0c\VgRRl1*QCvwfRw-etA0s^ʕ1g@_keqW]^3cqmHN?pC==*8ոD\F[12bkVށ"˶rDyjQl1s{w.гzyk# ZN +.倌֪TTCNztE ANsYurWFz悱J6bT$-*2ԊKZk{|cqdQ?EFGv|@?0r_݁w7}zaZwa6j!1-&8&5'L)>7bALr:`(98E]9w縺vRB{hB>\?S9fÙMm X0v]oYyϧ>,ݠݍg6i7malǞ>2'ۛ=eλ{^.ڎw`Cc7okg,|DG#/O嗻'}޴АbqIfq6*6@dԬf@A&F5 HP9X-ܢr} 2TKnO36ؖrseF[L,7 ,jZs9lm:;c7> z=K+|r eە<v%' 4]?j@WhB4q45WQhizz{*HQ!DR|(5HkokɊTtk暯X >%E57V`::ѿe?Dp\TwB v ֛4^r39 @횑%)'MXmnPïz7Az=L^#G,92dHekuԚbʉm(#;iꡓ7Ol}},l->aWNJzszXۇ#j0G)}e$əlQ"WUz)x`|D`C #HGsEn3yRYJ|U?~Kb:K17w_7#~ %B_;]'GK6ɭL/5(*/a .9O,kPBbcD;)ڥM? Gڼu =RP%u,ZE hdfqdK,h`SFEL1Vz#i|>mv!f1w̓9MnOO5~O) 㗋b9On.yIG;D6* b{\r%hu ]y >"7,..0Y[u+lDjʕvm]z5njǤ繖a8ov}7ȳ|MŠ?s^ͺcXO|J=Ѧ7)оj2+8T[F7הJ~ }eZD-xcoLI& e5(UcKaU5cٞgz|2m9ᬄ~癉ٮr#}hk-A& Y:PO%R@$m, 7$G)]UQ>piMQȅƅwϹIX!.4EKbۮa{de` I/xʏWðLxL {"~sz.qd[ztWxN$|e/;H.B \Qg$@VsIros:$82_r ur4s6G֒ az2T82XV (.3h֗a KBe_%gz=J{bl$ ' 'b%[dt"p 3R#B@;Au?tTG&di::T(崆((劸 xJRZ1R`THhBfB!E {/ h5C ,0{w"b+dw,[52.]?wkjo'γ X p>ӣFGKM*H;* ^¹-w"bU SDANˊWSmk\^8mN3AT}O!WKe;:ULjQm۫ɧyOx(skGaب9=[ͦSWxN|Yj31*ύ XSӺg#~LuoV/^_?5N(+ -.^?>~זոr7]s,inGo$#],-F tydyD?`^|ד]+&gQY=dF]6W{ǧ%Lh$,}>A*28ԷvT!N^wׯлdGoN&?|wӓ;9}58C%x v n ϛmd[Cx mYn|qmS^2m1sD[)0=r+C}orI΂'_8N:zUE|F24|.*~*p+b؇|3AuPUloM}.<+Ei~ 3pJRZHƄ$[xrQ`hT.A\zoI(s>zb8G=vJS[&<}8y gR B) _ %OZYBW6X=KV.uwk8H="6W;{jΞKwUnSK4/i%cć7a;3W{Rn|֍]%\>.feٯJہ?~ s>w0F SUjC@ZYu.3u^{vD:>G[@.UEME?m^|71Yd+pټ_Gշ_UGwQw~ܼJ6`QE 3G5=_~,jHP.nQ3Fml p >|9ь>,\6-yޒ&J:QmKm,EtIҒ27xؑ>ȉʡ<H9&e|Rmȷm&%eH$c"Ai1]5/( Dsͼ EzaiC,i$_hh:jklP]{zOݚ aS5Ad`*Qm h$t  27ΞӞm=r x8^k+Ρ}7Cښ^Zm n/vt{! sdSb}h:vALk K JhH9*QN%C˥KMXnJ.zHtq{gQk½N]{x ]>S} ]S\NJ{(rT|bOaQ,~ݛGY#tFyE=axA\6|1i v?χUj_s[ok ho#I*A/C¦<F$G]L|Jϙ+%LO`e섭.Ankc  pe" QH)MJvS-W!F!HcVLaڨ5gR əDPT u|8MXʎ%+bʜo$Y\LzS'? FBLjAFKLZB` r\B3u–}Pj҅@c'3rCxJ6\6I9\iɆ1Qy,1Ub8M#&G|GTsCXVo?xޖJϦ`E2\tV}gʎMH6mbƦƽ:j| PQ+8>l먹dܜ 38c4L98*[CG2aY?5:7(lT__m{sghJ ufǧlOؑs?(گSTݩЭ~F wvݵ~**CiXǮ^ b\IB%φ]!®2;P*۱Ȯ8d.eNz5\*S4>ʝ{gK ,J.-*`8s/% ׃[KQ.R$\Q۲;KikWqK%I%A$"SNVGD5{kZ5P21C7D/(18-(K$F)RZ\z=S-Ka-7堷L$wU^ep&Ĕ~w7r~Y[z3K1޼Wm66r%hA80Dy "7,-ѱuezuњNmV=z5e-[7vyx}=ϵ\`Ek iR&3gk:.nzEy)jN?ii%P.yJe|e"&V|uř;d-}^st -Fʴ8+=R9$2Zz1QIå*{^NZ;_,ޕXnbW\ Xǭţz2H.&uJ)FHX6DoIRDCW*RS<**>ME:ђmYz^QY@K*j8IsIros:R3z¨M戣Z6L PD2V`΅GCBe-k޽XWGCdЕ?X7A} ĂXi `$$m HСN k^>VCAEj.+4)IA Bk?dSGHҲ+!P!!Z`NF l,$2 ?{WFnlRa$4{v~pQeuHWyzzhvz~Q(5G0t[pXK:y}k%'|Ň5όwܺӫٻ1?S06hu3-NѤG'gͳ]qL&?_fy]?1M4;6G:48f0VZYOSd||ӛ1G7g敦׎*ã.orӨSףN&-ulx/x9rPalɿ_Ԋ.jyNs~d~ Ox:Ïӏ_~?}ᗟ~OfkM"8xx o}hoZ -V^0t-zw7 W1;Kn@Jv&4ܦg]tS7nVMGpU.b^΋KRTk)ou#fz#AW'xСz}aVl:W&}F:H!IU"ZB蔭)*_@ƄIxNHzio^}G E6{M @ۘ9֐""t'>gY0e()tzUlf8^O*IVUW˥BHKJ%HBXR>Z5_TdZ)EyBBF>d|B]Q1`Z&cc4"D_A׾tŝkֵp3hp;P+' 5+f^Tqy+M/{Ӌ*CӋ6FklEawm=䍋]zEӞRɸ<.^;vuf YiY76ZQ R{,4A=r 0\`8$OMԯ!ʭfr=rfX@Ohr0*̠ԇO6?w-M~n989<Qe_H{$8ݲ7{>a-Y躩E%/pрnyDHctb|!X ;ЭgQk/Ğ+EZ@)ul jA EH69Č%%Wd*">hnK-BBӦh* |BE ꗵ"(l>"BfqbبrOujx>:}Zq=*3vrv2?g^KVpcr (AwSkt>MD%x@K±WϝdpT!Z_>_.r)-㣇]'D]wYIA$x/M#)R.$tP DrL/Ol{龽}FνwoI‌ѹ1YQ(! *bҮ5^w{գxd@=ZM6{Uo8뎡T \e (55_jf ϋ9HV2 RS#<_N[ͷ[ ؆l[ =wPqm*U\۰(Ҩ 'L ǵd||>kшB(QȪ6⣎rTL. ;M߿Vz ('_%#b&deL4JFk sm{)c@*ebV!bLrŅP4آ /Q&K!A"Srt UZ; aB}2yB a/csKPnKSsy| ( VoNXfTqi}oRCB{+h:9vx)Pmct/w:4z0,v{n۽3n/b&E1J(N()`4t!idJ-LoO@L<[ Ie0:j) o6)*fRo ~pvzUWv&zP[>Җuqce²[ַ ̍$1yJu³5=2 n;o%kchYSH3CG sXdR4W?~SM O7߯-HHV;|4ES8SS`ڥO/Z8*(:b٬DW};F웣8*%3Hѐ&)QjAC r%Ha@D(i|m#DO& *a)К@\&1d]8;uȇP=GfY$Dh&][0|$j]V`Ujܶ)iM`YJdWR-xI'V'Qk`rNXQ]QFxtLK֓վd6'4Y%r@ S<'g}BM6^rkJ}t97ԃ4h [PJ뵗)dS(F|liEdˎWnI$*"`(-zEd\; "ѱ0HFflGv\6B ̀^I2 e{7Ny|ßx/؅I R0FdjyJiT^͏{P)I%+l*jt2+l ԁt%SI#َl+&澠vXvQ =0|0lI1/Cfk1P:J39b֚m.44[ A9$$eĺ"QM.xLx*s͏D| l՜Bv:yVN+CLRG*21F٘J4ѳؠ6~ڃiɰXeL”&K"3[R;D:~8 Ne}u6%.>lb +J AX+`4:Mm:-L2-&2b4$7saٱ/x.,a[B]kVoE_a}ApC+E?*Fl.E]3ezʑ_evsl^7yqg~tc ^ce-ɱ`,G3A؇"lV?ܳWc[xa׏.>0^w;"s[&sm4:QuW)"&!. ;uϢN]@hF@ҽLS"Dp%?"z0Pz4`Y\BR V =\8`X%{1'rc.n>l' nS&wϳÓQ-gf- k{C* cGf0* 96K ,0e} FD-WY`fju,J3З,%ez˕/R`y r5 9uck;Н#L3MO 6m˙~FTVt<r .g->u f%wFn`9qNPA& ޵5c6, )77Δ|_ǷF_>6XenTm)NtXk?(?/Q^E͞w &h/7hoڻ L}7!2|*aAdXJr49ر@,QJEJ-BdЗB_V }Y/+eЗB_VʕB_V }Y/+eЗB_VVB_V }Y/++b/+eЗuЗB_V }Y/+eBa ڔ!&:ߍoZq0)&VJ+& *l*wtQyGvqyǭdi#k-BGQ9`@ !!5R49) d TxR፧:c\j,DK VH t[l6Ym˘zٖ 7 3_f&|Ԅ53&7L-I>q f~^qu m #@$Rq]ϕB1Ʀ* ~*SGk_@+ag; bRKvp=4iL13RNme؊IuD VшPV CnJim#C{ $ %Iͷ̽@1ڔLˤ9o7 E$.}b"였fgBL l"n,YֳPt a=_]50328_'| Wwh.]‰-#"x ׊poYNّ^ȘAm$8e~ƅ7FPeу\䝢$tyQʧ5/\`M}믌.ocezN+`ߛE٥^2O+͢>ck/ 5¯X{n%:DJǙͯ< ^D^=Z @|FO=Y(OyhK?xn۩ ׮zv{eGюGspi#* pEpAYcgByrJ҆9ftЉb6QjH(SHp;m<8v(Cd(49M6zklbEq4KVffnMfӆײzHML=!,%A9ic$>VTu$3YKB쩰Bl|I݌fu3`>1Gl=2Ofn)XHڀF+JlD<ȧOpϝBe*؏pɺ2Y7Q\&ļa^΋`sqB(cJITĢ}_}_id* *X*Bqf9="1\녎J)oH&D&SMD &2+QAfBĝQUи4佤EOgL5 YûCTȺ:>8ggow{9x&)z:1bH\eY>q9ˇ΅|3gR`' КЎ$ |s$sk aPN59$;+z2xu{>}גD UGq&l$F:tqqUz.nH8O8_6б8Bu2~\zKԯeeWE̍)Nǽщߨ69A׀b 7h'.r}c>vV4-ݙ!Fs7/ΗCaP+{^dlYRT ~; 7z%t klFhlfYY> ?'<ي-^uk:UFַ:dSM}QĈ9> }f;)=d?=T5Jԉ'u߽@u.GޟÏ~(ӟz?8LX$//@M֢͛Y\MfI.7pI[;]?7cכ竘_g ꝟᴰ 1oMGPlW3QO}'Bh1oČH3>7P~w➓teb&C.򈝤@c< u`"Q6Š7:Qf]pDs~-NҾ 5ϷC8\zn-7u1M)EQY<|II=xÂZʦ(o%%nk:eJ\Mv%[;b'u:\zWKs~EVxN4,URF RHU (Gp zM~3gS!fofww'Irg|_4 |"gX& ;!Jq}$@'Lcpe%)Ę,QÉz 3-yܫA9ƃ|+S adאh : /VD$$8ͨ+R{B2S茁G8DrDr0.$$DT\0eN*$}yz?ThRHPhN瘔c8UUfwDCh=q1ʹJ&b@82-)Pyku99pSD;&#]^%]qL`"Dec4džqVN\{7 =QǃA%J@O5!~_^tfh*4d |%))!;`M̔g~隆@w3l~V%VCeI5~~v?i` Q='Cr.H΂5K|DЪUɀ9$ =0"ALźmA^o|-XGhk1-09tWCb{z2aVXl(c#J|Ceo2KwDwn?e&:F G<R,j u b7˵i/'7 yPvh(Xj#jk%KQ@&'UĶ}-q`gԴ0vp-Z|ܧ/6ų|%RcJ˥ׇUKZV}i] uiՋw}٭h]?gy豤gv[gmL@2Q,3^€G D9N!ٻq,W2;V7 T7XL!31ʱ3SUeyvb%U?h=:O4(iy2]~;F{v H6eeH$8hSJL3ͣIe5dvgr֛ԢgwmL Rn-6@<;p]nxNYڗ9gD$$ʙ”W.%NͭI !Ro o#}"c)qO*,?7.S\NǷ۷)7Cǔ8O؄z\?H%kbrK7<]K`cF6.&pRbRۇ@ۖʰLyr1LtYU ^mђp7xi ёlDku-4>9<4l!OIj婉Vxbٱlisoxc6)BXTfmR{$H$cf*9%zytRt*9Mkciʷ!"/"֧cWT*?}+Zh]\{u k_Y.MLO!6 (p iALͲ"O\n'WɕwlKK>fG%s#U:%O{-IO 0Q*Ccf;r@\TL,f0emDtD#ѠB:x]O.K$l2WLAexDd챥BVA{yywY#[ Kw rnTĉ*-cH4LkEc "Q;M+D:~IR`cJI`_ )jcDB( e_i͜3ߥ tSp*a, ɸqI8 [GUDT*8HS-M%61T,+r">+΀H*˝Dh=q1MJ&Cc@:@YgVꖕj`c.QK%'8 xIhWEGHG@4(|K>v}V*מ^"Qme A0&a<@al.|Z7O˱A`u ;VpÛݠF|K{2d;0aZwB`͕/8(I;AK>`O01iֆ1 ΐfH]WԅcU[4" k}ֆk%uψݲǭ6+-(U8G>Gd")6/pY1ڔLˤCV2 "&^.VNo1i4 %7l.|JO|9QD/aZ- v$Rjp4_1uP@Ub5.Ҿ5՛E/[bn1a;慶c \>fyfp|>ϣ,%^|2-j{# 5+%pa%?N:q3jYՈZՈ^Ո[pE1U,< .HUISWi  0&XмUbĄ[ό$!IdI"(xJ<80Zks J"KmNnP# 56icËG_]RߧЉg$A䞏-'? 煨Ir?/[2Q* 2Y; b b-1 $#"UF& eD:9h< T&c <%nmR$vLq`.x#DB0*K,hrrT[3gǓ l~Jpb{:.xjW^6+Oa+*\dz"zUgf4z޽{Kk;ѴrX}qn3\u. ϧ):.t\X\=vRf>ͽYU% ,Yԣuj7u o>Ck%7lC}˫#6ۼ˘Mn ׇ)_Rql64[/Z`esS=dFu8SC^܆ 3yп/.UyVmI'IA9'FJ=YS^3 Bb8t"V.aAh=8џ'}nvrXnzAr4E,O`addP2,xXDU) G, 挤֒,{I\m}#=RK+M AI.*6D!J&(.i *`Cqr+*`YwM q+%b!$z-">:/Q4cGJBy .SVIOW"Q!5zd**c$ ȕRkH& Ԯ#5Eo\`""Ed&$HLL9h$qFht{ EOfeˍ| ?Xw!oQ9])N4A^h}WMyxq|MFңEňKui:1k4]3}N]^s|6oh5wd]lkS[5>;k_bĥ >]TY_/8QC8l_ޭtRcEeȏOq8O|?/?~t T_~/`I 6`0έ>m[k֭~[3wm]klr}ﺏ^s:ojʭϣ.8 ?˅Nv^uW7yp~E/ e%]6Wek7r'ZyBr9_OR 0MPsEP%xᵦVDCCPBo(a7_Lu&|% &=7oJ ~K"$O*:99D:JRijD:q@ T K;0;0!tʧm#CtR ٻFr#WuIH GGî=?Lx NIn%oH"E٪UB%K_:$_q.yR*k `!ZAhSmݷ1BCҙxFϺYWg1^_ܿ-HoXna='@1_bj Vf>+f!M /ܴ)A;!tlɩsdS[+@?D? p-/~BY"hTɡ(\A4ѻ1[aZӔ~JA%F؈ ̅ؐ/*PDUl: }1lfΖj:'N>}z50~v|vjZG1ud_~|_.{GvLl&HeS"I-GCRY 1u䡩GT s|Vmv ('x`%E<;U7flywU㬬i|shkzL^b>"W,GO+bz7s>:@q$ˡdRٺ3f+ZU0đotV#'YϷG)*vTmA+I%jRF%\ ]'D+$$OCܼ)} K_,{I/3W|i%yVUU*">HNv]@O(.szpp)*IS^[۱DD].z&NٸoNsx=6zru5׻^\s'x~QN>fgb=B8ZSsN7X?}lS_0P0% Uo]SicVdxլ-ȯ0L!l_K<Ͼ?ОwbTH̘لE N߾q=t9=_S ZܸgYRkh\񷒛o(I9R͇Esn~YēN"0vX2Z+gx96#qE[˘|@)eH#l)B& )p1hOa*TΘoLkox;w|17&AG/^(EAqOMIjI/kzurTfccgZgdm @t82:GD#'!P62οBX"{},14UoZcЕY!gsT':ՓAV%L}fsf tꂟF4?SW~ןe[NzCm^>3k̬׻[E~l[ZTHG܌ Vd&Y5טcInG|pq;oY>h٫3қ5 ;rar;DoSՊġZ:N!9du! e!eRT&@ɩcqR2Je6!*XK笩H+lrNA}3"Rr{*(hn̜-5]Z^8C_ƚ2AI{X0 P'*H+фV<0 NL*LP?\O;ƌHEZ(c95'~Ӫ}POCY?55ka(Yl\pqȍ[?rh٩sO5E[HkI*I-Z:B(gT5m(&~skg4zYG5%t.ikSQ zrђIűcl)MN0HFflFv\6b!5BpgA 7C |B dKpl'+]FKEDc[glef˴b j7if@>*&:-l lr[RŤXc!SZSzl r訲aIYD-Q3qd/|`K"rPoਙ9+0 "6"x 1oAO9hQB2v>H'>Or8!A3hmLH!Z:k0*9hsu?ncDlffD|ly:]q5E;​*C%ڗb d?ՌYHHo !3>pP8=jcAi:JW+i@鷈NQ>^_{X}f2ɗɎ/i|L'Pގg?&3*V^bŸ]Nfqvs|͸b4jD]O ^ bYWP}Repq4{ѧ\,b:f1ShE/i+9grvtd A {5H3]VzN[BU}:U|N2U|u⓬%xǧYU'PE](V_`k\cZ@Z8+J[uDp}!\Us8; ZWo+B \)UVUreWoHvO,ݏtV%_ ?[JIKIHdRٺ Kh<}6yz*NqA]_a:z:\_-ǔ2.?j½;$N1JDcXCj[Y+`Pˍx=U%eOzLκ B. SѨ"9!r"%Qa,Ar0_e׽hW6P&y&*dSIG9(2cc* ֑vQOrrYzJ&I6S ̂Z,Cc(1jp!gj{ȭ_ѷ^`$4v @٢CڪeI+ɎH$k$ٲ= '<䐇9f#N8sgD]VWc?tgߪ_K{` [,?(!xwM{o:W7L*]e14a>t~ιɋؙ۳r1ˮTnRE;?y/_;d}M/[tAE rºc=ӊsCMn2nbl(]O"\[U+MN,Ʌ[\~67~;(K%r>raG"vƮd"eiUwЛ 2F(ٛ pP+r ڕiYNm{<|*YӼujMTKǫO}bn[\0 gr{+Fp4UfóO;? BFZy# }˺aX0µ:fpT'\ ލٿvLQ=buQg$,q4)hWNf=ڥQmV5*:ǝvo?/o/(3>ӷ/?R֑Ii'po~C8bhnC[ ~ȸ)׌{C[*r?n>/iȥnGJOv8ݚn̮ ; r ]TI-K(Ya_#f#A*leCb;uibtW枒D_{+I I@`#T.E\qsI9QOֆ+-π#:DJr3lJȣvpG|' TW6X>S4ήƧRڬ=f7[ڌ]lOq|mvFؘtg"WK/$'~R;|PDpTB[- .IZP&!GPD<&vrU U Uzs"nPT!'5ހ$M3X|TH 6`͆_+f`ݬ\kp7/V1|!}DIk~Z\O~b#. Q.s E h%V  G5EzBTlpe"6 QHک]J6T27 HH\c"@!+&0K-h͙Brc.n<"B,9A]` {#gydtZ(ή^oA8AV{ `vɜ+7|n)!{p)VThƒc`kGE"cw#3.2Ϯno3w.lJTo_m2\bԛ-m߼~#?3-%N 1҉`Upy\CVF>V#[ϧM(>y<DS(N'<^A+oP%xl̹ F\JʰcuHgHeUI$% 5Ƅļ'#g`ɅNIBzdPvNr)37f~$Bh/Vy!чiTG}.W(.{[ALY)S]Xխ|8$G(O(Q64 0|XTȴ5+¸2FK,(ɹqVHIý5J:Fb6FryoDN' HJ{#H7A 2B~|_+ n՘֞2.n{,^ؽeY"_{‡w=>qjt`"w6Xc]^/̴dW/c[ϭq9=.uݫYv- lYwZ{=5%?I}bZ+ & {p[W:}Ո_H#6Z| xw)ݨ7w;i@4Kp'V@2CT+~ITm3رOu'Ƅ'.*yJpAWHރB;lH+nTdy 0A0-d1!A[^¸3,:#5@T ƹDlgc 8.Oxoxҿqd lvX̶Z Pyf%X9ۛeG (jb+<ZhP c8n|·? 2yURuE.VgM>RL`('82wxR Mo91y՞6H:Ehts2l9O#'. ?ȥlY={aOIBQЎ(H[ nSiRJp#VxCiIўNIV>C_uѢ*4!8dahQ~? ÉlH^jYF[*OI;u3vPFGy]u]dwpѷILfwNΗhpB]ڷ;Jچdb8uM+c հL`gzN&h$A5 tTΰ vx=xZm9uVQ@8E;͚+VLDX jQ*9$uC,2:0Ȩii p3y",1QGl kBDkh =="or?5B9a%.itNk/*X.9e $B%MIA/tMHazytVLjDW ,Pqc{cdL6r2Nǃ4(rrh__LL]"heӅpFSVi>&-Z}Hrj(3Zh9X>3j=(X\Ch BHJăcQ !13:tr,=}3pB(C!pgk>)PNDPƋH(#~wd"3ƣXf($1 D+7AMZ!-t:W/2S GT9889)v $ Ѐ]QQ8]N,($S{+%ReEmM͊s('uLRT徼hm N'n(iW}Pʛ}RSOPLkVGN"J@d {Q: +Rg!$"P2oS\A1M*Ffd j0D5q6#c{\5,lmf< yX;,|t+w,1oypbrMH;.P`qЯ#%9răiRRF L d81X5TȠg 'H Q{904m'iMxCm}AG"C "jp b%̄H,Pǩ06no :L[ED0BB2tSĄ3[XA`Ir,i$n\rpk[g3"~<0pܧfCqQF‚001ʈ \"#12H#bT [cakc_P? 65rnA?{ٍN ~|"G. _kKJ.FF ոzO_8?_ˣf蟦e+R8hrR??@ fJ~띤?Ic7LA/dQ,Vg=t8W/?gϾ^_xq ѐCLDw6E;.[/pN#poqV -+Xj/3){(khnQ0|g: |{Տo:$lbRW@٫M5gb8Z}3!c<8`1wa=XFJf/hloKmd\~!w㹉Ԟ'|]օq/|mؾN`꓇$3Ƽ`Hq@50>:KIZuμ k%+X q0pUPJ URFZŻ%J%H8-3T.Q3<dfu =DFW/hZЛ@C^b z8?BI%o?Z0Bit/*3vPpfBD01LZ/ b4 +c9*C;} A'~{[ax ŻF1EZ;%2/}?hg!6);)YcςM-#&OWX򧆫+սUO q?)ɞ{C\a0W .P e;\\B"JjtH(E>q>i9AWe\}pEx1A,9;6GpTiZ,Qr =85dUƏw} %)EdA maH Q j >vh,DJcѮUL.xYYwy&|U?Mۧ۴Xr,H"&&}QmhbDD# \s(%ޥ e1g</Ch4yWO.{{v09__6S*ٵ'w;U"2W)7&j uADD$" 懶 *I 5r!!tL|1};wR!5t;Zqtݧ3QTIn؃.QFUxϝտ]mV;qB}|Cr&h+|XPv@? C! br9XGBѲ~K_A\(9L0+ pM{gm Ԃ"_jY"b =o\ CcT򭅆B1T)b+t@HT3WjRz\TknlmDo73H9m6=2jmRZw axkRaU lw8pOi8e|t(&wKLYQpcbɍϽ a)GOZ{l7f֖FR|Q!|$)~ҽb =!"a ):q)#@]'r'[6ZCp4y]Y43i=sdtI$A5e<@2TYgr3op4*k'M̨3hQ9<`p'5q6k<d4m=n~MQK_mWtYХ(CmY۸jGnّ_k wv$[䶣:h9fɇ5_r̔(pEżU9KtZZQ?1y]\NʰFFbe5U5u0g4uu/8n:a|Z2`g[Af0>7ofg~o8]U:~2ΝƹPʓ7HN8\Iu>.hTs$U-`tĮݜͫu{hznry6Zjr#Z!cO<@(oy4%=vp";-7'&=V0I5|'0#R.#𪵎W<4?~VU49ኸ#wvLk5!I!I6a6kc N7j6IhXǞd䷣\=,}FoOBvR€k% :@=2i2\U"/fY-|M݀t.ooW Z ˂^@je%oSmNuY$_3_G{fǽ)q5w1ɻsCz`ƹ;ZN*3\9Mz=̗??.i1Ca0I1*\x7}eQM硤.iQJ'`@}LAx=Wϻ`oo_ϴD: !Y t',]uZyT.e|f6lz {PJ+"X`/"yZV7I3pY _AC<^\љΉ+AF?_rY4{WiK0Kċ[ݕq^ s|_}L}yR1!ʛ%ǯ'NUo~-RS Y5S~"4;dyDR͵Cae h)"JRɚjJmW`7TevԘrit`..I`+-)vn%möjvUWb1#ƷQ &Jt Qm;J Shq!x }֙XKa4WTv 3YUXQ:9}"{Mzy'#Afܸ>,iۮr ?Bz #*Y vSɵKJ¡TKt~%:dg|t yan.g?],"((XlM 0-37#fDN%$˨D) *ۄ.J)/T$Y&*8A%% DtE(G *b5,"go ] MGŸ,3Z=T!u:Nd~YL$$c ƞ*4605-dFيN,L"I%u"^j2}_F:*ơhcԈF<HjN>Y03%foԁ y*b o :PjD@c5ƄL/ƂbZdz&昜&C"[ugN5g95b#D֋!:{㒇E߳^t^z1!x2 h$Z7I`q2 & \A юz)Pao1}@IױLAҮ{G ~̕0e!ApcklяRś1ilRh|qx/Tt qS;iDv൨jQ;>e9cb4YZHbX ()3H CB) h_W/A njL>#o D03T:hd* :RP6 "琺g^R\l[sn_}b."3}]a;x-^nz,On^Ey<|ԷhEl>5o.K(Vـui|pZ`Ʌ&!YrDqp!= =Ξh 0ZR୲%w8B!D3aDI2c~#! “EEZFrҩ8=Frw{_n)!a6Z73vŭijgޖhZ.Lb*9EL {S0ln+C8BG:5U>eqfyG J@ޥFg4MMN&C7zJ/)6wU}z"G7~0p1aCIm"(pt3 ڧ]vE1H\abRJZD J[8IJ!-HSld/rߛs J=LSK+E¼h]T!5wccȋChd$jTRLQ* DQDFe,sD.A=2ǐu~IzZB]k29$EUfLe[x@]ט6 ,dV OD=V[e4V,`z3,LV A!KNJ&;DɏT 5"쿸۟ki:D|kP@bhV+0RWF"Pik-ŗQKf=ɀYX 0f੨"Hٱj!HI$ LH\]T1wի1__V g\zcyfV$rӥrǯf;ÜM~mG~hgݩDЖl/ďY^zпMnU>dž@uL 'kNbAdq |댠JX@51 V:\ mW?vꞝ] je6xz9[Zܚvͭ]yx[]QmsmܸUɲyu1򿭫+?~xvƫm`v㵺ft^f7osn9&+fhg-DH[OH}0s96L(OJu?Ϯs~zxUݣokԮ{ufEuZF_v]kIUp?nGyeg$M?ev?O??O W?+Οx.)$]$Xx4 54Z9}^y׫ ?d\-w{W|ܖж4mV?0.R=|>5 Us!Du+I^dU]yvUw*]TY^d7TU!Ry=ҍ &.?IZRj}l\fA$΀&VZBtd9  0ImX뼦/C8 j)(3%@YB6jo1} p>5܇bP Z#Lf;=XA-aNB9ޅ ($*@X0J}AID0YC!JKdSYR{m:IH+!Uz,{\#"eᤳ%%el{p?'~ QT-PN-`DdӒ4dF]r~ hYWqlZ w/^hB:rix3)&=29 ]6}xU4"T)Mwi$[^$c^.:0!էeͧr7SNM:=מ!#~^S:8˭4',xɧ4\rd~!S>q24OrG|6?;1*ڑ~Hux$̧0,_X@lvs |[>=nn.#z!JZdHC9揋/ԊvqMi1 W1`kLIs,0l( i)X]cDBCJ%Q2)o@(r֖Ȑa೵Jլ `p5d B=oF~ċaeGycЬUy,6y'QNϝo!}VĂJA ɐW ."/;&ARzFk5{̲kGGxWIܹQ{O + {üߎ 7V[r>pYgф ]4z6Ƿ=X5bɛF:DlbK*}TaUWڡZۖhmnۻxDyUܣHj%=Skzz:3z\ɅmߡJdzb*%Q]} a͵oAMf*l& I=xLɿ ˫\_.fMϴ 6)τ>C_ *&| 4$}Ju,jRSojRikm#G/{{Yd f3; ̇h"K^c߯zX~Ȗ-'jɪ⯊Ū^Mjڠ[1φYMv0qyHa\yfyɘ8=OҠa1asvz2*{O/dD>uf/4NGĒP(0GW;.:6 J-1v:gk00<9o8/1sJţ;~o߿jH16ElPxIɠK A,WL~0[ru!t:@C$|uݯ{ ~s~j^ V&~p:Dtn l"$2xxXp3ik4Nƣh>Q,>f,Zٜ]e@٫RRh*sdMTpJ3W;i.;P0htf͖jlQfFlֺ$< Z,ʔw΃5SmJ(,o"P?e 2A",$V*viZRǂ9w * Jيӫi kI7O. M;Q5I\㽻='̳g].󷽨콝jhcVYJB4-@0Ibm5H$cL%tƹ\HV=\rY0eW KـQ@c(֌sft ;muX^>. "5lf7772Mg=Єhi4}0Ha5sJ-9Id K,̵ 1t\h< Y TMel2 f+}m;| )Hu]h+rq2+&hθc[m;ڶ=㉲Jy[9*e)8 RDa6H1:BTml`j4 [Ȍ يN,L"I%u_e/F5:ևs>lEywE#vF5@d Š39Qb!J Tddȓ2!cͳASa,mL XPL,UC2d*r)%-*&jX`ycrq42&:h&Ogw} /^=qSם4 :>Wx4;WPG2D1IhNkGh(ȅ!!xCB%ݭ՗ }W⃋@lN>#o Dtf1f* RP6t "n=˻TqK28\}\ ؑ}.Ы^0 ǫτW-h)-Ģ v-֧F룳U6hB,5B7\hANU {+žZUAʨF R୲%w8ᅆ(B3a@&خۈw!Ӻ$ @Q,JUN36CNx KMn_AMٶToK+75S;~?i3*|J(x?w K[~q3#Ch{Oh\N?ђ!PjwMsdSɰ =K߭{THKu:64qsS=Ŏz#sZc.lpuJbf%ImDAA(GX|ˮVV1k N01) YCO .*0ނ45J"w9^۹~} = SK+.̻}&Z wsxmfZ.1|&4z]BAʀ6JV(HWklT2V%Hw1$Wx/4it>(p+NZJBm XG6 ,d`;%p'< U4Y,ҽ?ǘg!]d&sdb(yJEΠgga^Z6$_$T90PV14جWFE%iBk-ŗ^m%3jʞdYX 0f੨5D$BgcKJ|$dpI&oI\]T5{jT^D}5 ~YjQX2ꀇ#,Z#K pݯhU|wPy5姷ay&?`ϪOWAJF1`χ+0c$dU|/*v^/:u__je}p6&5ZeNNN/`T[y>͟.Fں5c{ oVubGup\FãUblWDdY# rpG4~BkieF斎5#6QmfY޲fOZ#XGbmgtךJq{Nku]_ϝZ:~ZHmX3}rV8`RUm> &ǿ],暗_g/gix/x?~w?to3J5 :,<=57?o޴]5-V7h.{=M]vyMǭmkrmHw?7C85 Us!Du+A^f1?Ni_"*V8nLzd=>h7 ܾO-sR[h~ lO`#)[ZJR, ڄB*S+^]b)vFS{:i]8xMQ Ѥ%݉o[t3݀g_ҠAlj_4QDz,4hh@)IxDܹ#|j𩎎}u<ŸRO}Jq,Y"RBF%3 x; Й w/R݂$ ?H*+I`>AL ,k=ɆY,c9 W'0}8;P (M#7XA`~B"$@.:%eb /1ZKFmDc5Dh1e%%>^uU14DЖW@p%꓊Ezբ"RN:[B(^Rf8u~rk8o~5"b|2E՚u |K]EQLZ$$L/c$c4#DA־9n@[K/ΠL@eTI ҿJҕ\/t֪]$]$ VʉGȥXC0yE2=,I?s  Z_D5Ţi|ȥPrK4rwQimNwpj~=;=.EH7H,N/ip(KJA=a >+Lm`p<ȭ^TMln~O>cT,=7"0oo(=v1{i.kڜnVKn/aLv(i]6OwlQѨXo7N-&*BZD^lI7 u. $$0zI;e.khHqFR|ILqJu,eRh &UR[Jd(M% $;-=O'e7c,mUoq"r9T9SꧫVĂJA !*]ԼdI˯NYvm/}%;iw^}䎍^%ꞡDJeKS:M6+,= )zR6Of sÚ;_\MFquNn'U cx'1nx̟} aȩԳcYe)r50W!_ S-995[n| YRM-}^,}^_u+usTQmz{9^]Er䮙q*QrڽŤs-zyBWP)v.'YR/¤f!?.洨Rf Zb`]#tᄑߕߵߝI)ISI4hh58)IJy* RD9&6j<;BGEdurbrX+*h,*FfE'W ;iSؙkLa\=m%x#ƪvkir4X┚!lkdt5X֚xVcEpAQ^A=psVk{TETL(5"BT B5!hM$'"T@Kb`PUQ P`B<UX-zg՘IDk1 P i(#g˔|9|HMdhr*fu/, u٭\b³S'b7f9Fؾf7Lݾz49a:5kI&q&)BNW'@w1x*:at~4nFs80٘ kNFn~u{i'oyi0̷>5a.} ڮNhٞ_=}G'l/Nh^YSoO>X!1GF3n!NicBF!w>FH ttN{2ɥbZW]0HIJ0l| ۺ/+E|O1'[&åЧa&^d?~aH91g?olf{ƄwBe#b?vyxz;jЩ~Ox@(ƀig;b?niD67My~e^ҏ;S LlFvL8wǽժewaG;Tgef:m-kVhiy1`-^}g)'3{l7fFR|Q!fGG?#?=!"a ):q> 5hA.(4S0bʱdFD"`"RЍhA #(H8H#g5}{gSCK[ðbJZ`Zk37ci#qwu܃UZ9 2:̤R[C7Cu*'<]<:>nZ^8 JS}͂ҵ- yQv#D_a=zƄȰND$Ck&Sg+C9a'r{`Azw% @b؇0߰B]I~|tb:/l:.9OkNMf]d-KAz4 ,ŽjqXw[͹J3׌!aZʿ$+6ߥhåJ{\#ܝm% ?)01Hhwwq;_Ήug;k iؤ迓S4T'8$ swi2N\k]0;ß*k{w(fWM~Qk7J+Uɚ2e/qQFӪS7ŻesLǷ]QlmgLS#q\%[q8bwe̛. ӴݽZSt9dwCq 42.64DP;p$-:h [8 턷1H6i;Y$S{`>oa1ŵNY˒ៈ(=97+Td6yqX, %сYG0=ɕBLX}51My g!{d8֧{A$BhB>1Yo[jāsc+D#CR0Q052yjPzE@/@iǷDNb{cv}/]W:qܸUaYpR_0R eR9}?ӒUî7)fMnOTꈂAZ#kf0 pGiC ofHS9ڷj׍hAt2ȩ9._y̰:&c%E@c+77=ʹ3n8,S0R"KZ!rroDblpKPKbG}R}Roڨa8|=;_dߝ5t;?T碨 A8Xau$R D~䔍Ixs -lz(2 %OK^G @Op).} &x#32x3J#c1r6#c9]7B^ y;9͓fl.EES֛z F'W%9răiRRF L pb$6 83TkT`g@l6-&*@JT7āS3Aw yv~ga)R_hVcNvN)`> #W;\ߗ?p;j-V`qf 2(P2,%9 9gʑ}M|wm "_$5phɐ׮vja YenQiNGƆE!:^4,+1U3a{e Z\'ip&Osޔz"^*vs֕M2?{-?}Rn>bɢ?99d`v{t2i~8S}QDxLY͌&5(V99&; ϖ{J]+G^gcRB8έD*U`BQDM9h ,X9 X ^dHGC*Eeqc*h%{,=30 ^X/E63.yس\zU[vlJS{,z!wmG9ąﳱ-۫ L6W޼'S)U죡uQMI⏟@"fɓ#gHYΙ T+Ş /f:y5E\VVA<}-*#rD檠P%[;B}y)~Qz)[ţ(UL^^L'(Kx:godXpk%Fhtb4.9,,UnZ! &ԮR+a}[6-r-O^:8~4\(ոb屁؂MftXvI dvns,pbK/Vi@y4cg Az$5Y1]e,aET&+'L.WHbV *UР\t9\V2г"==d:%d[\wFPc}`ydIvF"$<2C=2A?\TɩMBAE1XHY-OPܰJ) |DP;N vkYCzhGU{w+]l%\۬;82p֖Եro( ގ+8n dDȸ*)Kw4arstVnŷ^˲yZ|߲a}vaTrttN%E*iҒ!roY$]YmeL JP/2zn8\ǨRK <*xsv9bJٮ8L2qTҾSwcp kk|uhX|`*ENZd"٠H h72_]ISx 4ü3gݳ M] bެ}B 7vI=_ҺਂˁhIFyNWZYg >e :9V:UmըY5`2tRHpI;orЁT|4:XXbT% _JqAJr2p!k/}We&&v!WגqPۅʥ,Z]'B$jZtV⣾|n P0cʉ\cD+T wVm#O-.Ѷ:D(mKY,qḮ$ȄhsY`KALVS$Y dpUՔϨU`CL )@.2KəeN(WA tkkFfV۪{|5ֶ,Lnzwﺽ^wDn$m\2O uյ/vKȠc.&iGZWԺi]M[os0fOe?l^ͺCjYQyݻݴzk󙱲CK-7pw|˞'}ӯ-'Ofn6/ͺekΧ?omh7hҷfoĮ-ΕBDco?М /k5.=RZ ,YI*'Tt͖>Xm>ڱB<7iCqj>~IT_z4;2_֓\ *`BH;P&@*.9V#d".`͌UHJc.C qυ`I6HP'RD99ڮ9; ڸn~Z(NJT+xx3Myx73d y[5ikY Ҿ!r]Q*Ș : $G":"Hmh9/I,e/K{>X$'P'wϑFGa9-:PYy =1j B[9({,?)/;f]a] !KI*hdfN90ZJ7>z,%/_RM0Lotd2 (# 򑐊I9b,GC?rH2R9(LWF 3&0\ SIDc1jF&+rvº+λP\TǏūQGY8cgo*w/LSZOtiKbƯ2DrazKɏ؛J3HP {^ }!Y|_`'ܒ!PSLp%'(2嗓 ^jl =ЮN0xdN2\3PкYRe`R\__$DBk/Gһ[ԯߖ,ݝ}TqRYЪT8;!?UjF Ru-9فy[ӬuoڼUɏ77oCby-m/ӵnmDpͤNbq/q`i7z$PGjF4@5Y~!M8VbB_'&5֎*Qliޝ\:.82(o^S S6tiO:?>mߑ8?C7~_>pa?~ i L|'M$l~~<o}hƛ--0mz໌ڜrø.1mmB $O7oq,W1^]d;O9pӚ XH'+D64f?Y5UO!%*^ZXČrP@xo PN/Բ}>ZE_AWl$%c@ 'm1Cf q}d.7W6sC JIvC{21zAuEAAk7Od;Cv6rM{_D hmuZ9۷1c G NAzvOSl99%!6G,ݛt\Z_r$~, D#yn\eQ䔫2BH#BMG~3IQ!#flf7ɓ 1&epmΖ84Y%dN|Ў䎄TBM )e/LEf))`]8i\c,'rgGZNH 2<&[}ͫe?~98;"^c0Hȉ9E4@`jTfFC@j/!8b ")TQ90-Ҁ,pmhkH%auABL, ?}wUW4 XՔ쎤}dš}vT^:ՂT>5Fh}"𒓌 PJ O F%fB)b0xxl3.Bxo Zx;:w˸]g\"z'(~[4<^^" zo[uq0?SgJ1Fz{p2mHp wTy% M|Gu3mRܠi0>7bH=<,&[QH I%W# ̅#rQYv\9I;Xw|*)Wd k2%֘z߼$ }Lɖa!ɸ*Y %Ĉ[.KHYBnӺ G_ZWg T,]م{r!ܨiSA%_9Ee3)eZ;',X}O}'! 0ڡ\L2|L9 xEdKEIGmb:2}fe9;:tGz4rLS=|>?͈x8z5;nVTŴRƿCۯ*K*E4!$9hr5g*!~:|u .AJ'h "/i&EKQ! 65q'xv<"uQt[<< Ls7zMF+8z"6)DZlPuSຏeߟ/:u"h8}aކ,6&&muJ:tr9gi4=֞T野[y4L_3*h]GmD= mvefw'%Ԇ/ Fd"- g]gpy7| +=K.fP=2GSuڷ96htE21zǺFuX&QtC?0\)SŻɤϋ?PRGmbTif0QzԵ{Y /ӋuYZqجͫ߯a@j\΂3A{jzʤ+yXܚ1s=>D3.wK=PE au+ͭu_@6]jΪե-^73&"yR~N3.>L?&ð/ؿ?zlϝ;=̺bü$d-gRgՒ}G١Te.gϚEuH BBl6=Ez;y-Ϧpmf $OU SWȬ]+C~\;/j1.oƓaoT%xUQTL)efr.f]M2uG Ecٟ; gO0]C|\`E _Wns.`9Vn̐Ve$"_jY"MX}fb!D-η_XhȾs [cR^iGjd>9HysQ 9Nʮ$D26Kf8?.޹[ЕOC}xO^**w^w \vzl>z+a}}AA;xW5Ehܪr^9"S73ӜqəB1zV0%hRzb柌;* Ut12=ogL/M: :>^\qkИ5[: RKhC("dK<+F:禦k3LZ;UW/Q+@Eeڡ5;˜\YRMaNr1}􍊇 ,#oT`B6*z H \%q =J2qpWO#+X"u4p%X*IUR\=As%@` ĕh`u'I•0np~]IZ އQ1&3=S)(NïʕUggw+] O))j$^Њ\ĥX7Iod,>I#4 \_e PlM@W^Uީ(>CvV|$TI n=к_+m]훩H~kI{(9QGMVGi|6`¶Ul{cG:3sqfPgKgp|X@Ƃ˳:#@wi(4wCo6! n'dm8Odb(Ua*J 쬃#Nag) (2LTN(S=ˮjow$sN휺W [*;3DTn}UdsU՟g]^ 4^գ<+u^&BxOfRd8Ibݹ03%UgL1.OprNXs%o|c5@:ok,,`07g-=dIv26Zcw9EgB)DSD0vlߥ˧ݰf^z"kT+qD[\2$lc M;k$:E%>ɭP0W_u4_7o 63=5Yx릈[~qwp <ZIKe }Z+pFrӛ i8{%# 둳x%׿xR(h5G^&qѸֆ$:z)I4F$#(O7=߽|yV{πNU@;z^O0Գ)7x5g3Il4a=hOg8ff"zJ'.Tq#gnRu@w0m*RqXtWruX2tie$XLjAru|:V*fڤca`k$F!8P=tHbaBE?1J 2yT}yݕkҝ8D<+L;%HmCZ"Ry'(Vl}ziޛGW .fޖFUQ6;BVi΍:3L#z2; He:aU& wK^jXFJuT:a۪kwNA-d|pR?ߍiK?$][}teRxbt23m^t4/?^=nXmn=7-U+<\+.v Ӏj]7]pm-oOj8E_,>ӯB.<8{#r(e ;j ڬ9RPf1)/"us0×!qIY|ԦgJnNJVz"_^~liX$UljteWiC8nD?E(U:e6I\Keߋ@sŸuAgLlal5"䳍fYD1/A8WA^zx!A6oPTQ|{I#|¶F#LjFE ?%5b<ۜ5rÒmt^m$^*)r[00*C+Q04h^JbdɌQ 1P{.806d+?';K3-\-$Z},Ys+*QmDN򹃿]Vl{zW$(-NB\#J vt.Saf}}Uc1zsh9,Rg2DO"Q!)nbQ ƑZX\ۓlݛ60 4>ttԺ8cV5+ڶ։>U1+g}͔ݓIy"z!zЉ<8'w308yޱˉ}]4ѥݜFGVR& 1A}0;JĨ6xΨjgϡˑ5ht ^g~?QG0 (b?X2'ԮjPix`,(U2xi23Ń./1m^S-CnK N1 4FK*ZF[dE2"x& CJ#68R$#Y@SJx->NK)=2/lWSJO3 D_ҶOn'8Syhx:Z~u>3yĄ@u)%>&r\gښȍ_a%'EUxl9ÉKKCbD\%?&qHJJ4v٢8q51ps;vJg9_^RƇ-& 3)(C"4)v Od).9QHBhy,I- KC$EclNO Z=tm5&sp%{@&0q;a<: 8{fx}mw>zkȲrZ6h@ɵ&LD+ Q]!K&2F"{Mpol1KPVSQ[%i<>i5p4A LЮ>$84dyT2^i}$2-` _?V.hR NjI.P3TULfu'"1\ "HVnUÒzUb|lzW-؇A q5ek:Ǩ=uΝX;Suħ=! я =*4ٺ;7 曫dBv?|̃b #Vp͗` Elh8.w;~x [f_% wBybםq ̭F8pQDM.neMRqsTa]LER L:<ʼnWǸ7Dpo^~oDu[ud+oL0]?j%BI"|鬾\ZA|}k?֯a%βgKa0_l}oިa~f||-m-LmcDtmw^h Ľ&`XQj8̼,RZ[wlma2}76$GYd^"O3W(JO`el׆ࡍqTS\POh@Uȧ W\&-- EI;R)@Pp8O&[ όQfnCDė}/c. e ˍ&<880fq4_qU!VyQjF.eU)j<ػ6/iؕݘ5ݎyKXlOcOϠ?uE+\"*G3F\V;dFyKͨ /8HxaۥS򹖚򹗚Bʹi]DSDZWqW6Tz=&\hDhKIf"IR|&AUI !#zj1!15cZ!NMَVZC.Ɗc 篎l'B' !s$2R}訨h2N:[uzrDO_3LI#4`ޑ6N(9S4R xk/2*f|k$A>9B!8ՅeQS 'SmnB8ã& ADBb,)R`aI%A$"SdHI{kE(!j %ՉC-J.Ҟocx客x|v8ցwJ8sxk5+wp:P)v<2~e W얬YxI'ZmVvrhX ]ι^Bض~i L]?.7gotf[SFnj{9/oϼB;伔rx =|uQ]v1왾%j~ֽwj|hΪ?Km3yp9憉`m"+ߙyyIОg~e]m0)Ʉ J=JU(~p)X ŁC[.s=.&*C@?x}*cx2H.&uSJ!6!zM"h|%ZF%ykqM^ -5+3N@ <\iu$1dޭAKdeSsҁi-y BKnCX!5:[ˬZ(3P}؍r-zeeG#HN&4OJK#.D,Bhgإ$F*v?RAWpobN:_s{:~VN_ HHAhy!ЎbBΝdg_qD .aPNѐi9ȍp{G #٢z6i'pT`=EM$<.6^\Moݺ9#Sz1зrn;CW2Em?~[Nsrr<ī)(Nf Io|vJEW5AVyF8}u3U2 R*ug\MΫ/>.(+bX\n?gӶvѶKDդo޸= +iG$.a,Ϩ>a V̳߻5xQf;P9kjKedsJ֕ZWW}祎!t$,qe0 91t1=_T *N3;;<{4G>ӧۗN?||J9珧3&奉:4}Prʓ|ഘ5~;q j~9 UT+Iv.USM5ČvP Ax,.kȦ6lU40* ERZHƄ$[xrQ`hT.A\"I(s>zb8umIϽ0yE>q"p6CgR B) #!OZY±l,z0&:ar5|%$|FCccbeNA|-O9yG`XU/ */An]6)PB hk"XZ}Bp (1IL8@z< ;LIz+J[HBJ M*Bɛ] ᤉ9`2l!H9|=uȳ+.]te[ĕ@c4ޤ*WJ⬐.y!0/`2RcF~dEh }Hw8%o8.2s8qč-w8>h! AL"3pSLuczy^M͎&Fet'Vd3~ ~1Zl_S1e,3ZA +C օ w`# 1V(dy a`Z$ *GpGm`*Q6 p&q.0lL=Fޅb88sGJ.+[`ֵ*yHO,<'L=_lQF5{%:F\ YNMPQDG`LxgXJ:28l@}2!Npd2(PR a nLg=ΤAuoWWfbJ`ShW/\-.^M2agn=\?\GaS]AK,op<=թ47,B -:{@>ݫCNsU J n|6 Sg|j?X * ؠ~a't!KqfN~B'?B9,#-T@l᥷y\y 6(wᏳƼY\_KcVSzᏳ1p&qyP͇?\LJ8~)øpW(`l8.bI+mީ;.<رx? ҃WI`G,*d J瘄(0#Me 50kCtL혵eG,3Nm>l^tÛYYݵOI_Kg w9HzTD()dȥ{dbQh+E""\ 0 $fO,EKFrRר^r'c5j lߌahL톡o-z G1]z=F$w#LVL=PD9RKUJkӿ.2;Ѩ?(3Oq]r5JJ@ ,̮.YZ hN [0I˭ FؚZZfyzIx3A+JQA 6ę(e+ƒZF,O.?s5A [W'& E!*bʨ6.& 7BuN*wƴ Sҗʺ -jtvWp`hPEM~覻 ^jˆgT!w:xs *Be٩F*4%x4"o٘虵&گijcU꥝?>kS|5^P)St3Aw\q?^mq=^6Nq- DvW=2YK2yݖNF}댯JOWSxM./̼dP'sA/NV'xu84-<{tX9|=;p:F}mLwt?tVt r_F(vq?V R)[N7k/2oGo~%2j?LJ^O~s^,|;$=/$ß~TS}ȨWW7Q e vk[tj7r{yz<iiaTkXL;=>*W[JfnqKvE)2wS}y]W!}ω6s&OjBF>Qe?Vl~OS!~d2Rv8,/N$'G5ģ?hƁk_3F5?u^vR=h%L'٬^o>6zǿ͒C&4jvyZٯa{FC%gx"oWK䲺~ۤ1Km0KоwOlZnOaZn}qs~xvH>IHӋ|t>@w ԇH+7t jDž6~}.[_1:Hκ6*JӠ]L :gV.dUSrE[`%RuDf /oѠh yo_dunާD 9XK)pQҕ䝶fGmz!'Xg0s%i#1r@^ݹ%IEvFGzxZ\qy =OYIyzGzBvF;䀘"~y&+) %_c>Çx$LŀK[Vt ?C^\EOU*&#*-}\^ާX'W{H`_³rzK/aSԂ#KMB`JQ7cu!{u[E12^_7Mg= ^x:|N|kk3f.m{KdקZE^/)٥E uXm,iwsgW&n?~+Y'{Y|FmYP7Vnih(|8kbU@wgn/sHR HaoD9dx$kt` q:Uo 'N+^ɸrW9 keyh)L_%ToV/+HG'1ץ`zE{5`#cTb\뤼uG# |0[ݵf&47j̤ф:kUP))\MH9VjHe45&Vw7w.V]i-ZVY*zZQ;״mH]C4N@لYkѴlc\N߿]WsԻfcuVe)k3ooڥT1 ײpAA}M#эݘ͠ݗ66{`t`8SR*Lǣ5Eg+-JphQJ %}IYg,Tx2; ˞o\:Qm3;%' Bŭ{0+)x!>{Z]F|T q*[VL6[i/A:fxyJJ9XBx%6(qs-QU4 Z'5pN5tEon#@Gma`}FG##e E?Xr!lZG0ڌMȕ@jTxAۡyR,;U).hCgTW)JXnƢ9% ̔)TXUTXڲZnB IjCvYwiN2[0*Vޅ耔#u[ݫ N7p QPEi簡nlg썋P7`0Q\nՃWd {hluı&6b+Cs"': qiygu}]ə^ў®\ * pEe6!J&e+)JUA . 'м Pc!3"yjN$\ 2ӪC5Q@q658d8o#X H)/:Y`H#P܎m@]BP& :jMUĝWmd-A$ Sƚb|?ns-?;F,M%O枊X6'C 4 X;KM)G?PmD-\FmAnB0a%|-Ru ٖp @`5HWP!.P`s:!mPGrNgyt'CȠ xd.dҼ63$@fjrZ%=2@~ЃRA*!o ThVa,49뮒|p4,;R$'JF?X-#C[ل$EP h2+IPju]{yVUA jP@ >``}P_]fKZZ0LɃ!_[`O!KƒGvoNatY-6\cI $y MA|Ʀf=)[Z$\ai ΝT^$FB#ZI%A'k6~) -1>P{2j1h7%<%2`rX_Sz'ȍm5sMvhP"˕<d*q:C:t3%fdi3Ɓ)vf=^8T W"$花vkaV97{Lu zR'ƈ,*FZQ"Fbqg= 3t heD&1 QٔЏQ5ӒoRhQ35y顱f=ش=ֲcitrd2bpU!9'͉|ǎAqp42\5Ƀ7_@*j؝-ZJ7VPP``rli =A+Bjz~)Hg5>T2'sĮw+|곩\,*w7 r`e. ;fEY 6) " ˎN hZ&!KkB&*?u+y*"8(ewT/-UoӋ'q[-Cn],ZݔZ׿¡cѮP73teη..r{}ŭ['W\.۹T_4;k땭9.(q +^*Y1M+~O|A_TPKU_7EIRx5ou9X l~A&IcE(Vw6.Mc $KZfW Ay}ww(ݵnsٲߚ]$AFym z,i7d%j`GYk2 Du 9#,3:̜]gTyy{-_[cQ8Œ T*lPq|:ib9B򔇆u9j^<-AK۹bί'ϥ6ݳ67!'/E[dnk2i[ܸL‡`vlrJ9el䘠@HƜT,xCw5CqĜҁ8̜qWƼx(aoPC|SؖTSvܺ"^TS7⼃қQ*^R^HBP]brNBm)!WjI%=ջv|y|e^WfT Sp vEmR $.{#gvܳjGrޫ.g3WJ)L)Mp^0Xc0H+>v{6/P&ss~VmÛ3a9]q1 \Ǘ~ƶll ie3%G`XLɑ09&GH #ar$Lɑ09&GH #ar$Lɑ09&GH #ar$Lɑ09&GH #ar$Lɑ09&GH #ar$Lɑ09&GH #arG%x9x.S/8"PrLxǦ^qyw%P`j_>s\ GAˮ6 9r5X?;HxT/1K 7#Z=Q12x̖y&eyMw8 T68gޝ=>bٖ̐I"1yv\x֤02w9A-oloSs7W>ܛ2Cݲ0\Q=:QqCP6hkm,os5e2n$-s;=' evC{HUn%Ϭ[8%2>h)60I2d/].csቌIi yqeUEޣ:ۥ7Ki>|ǥ&w!W%8mv^buwSyܳ盝"S e;$hIKDih%a:Vb8VzΎ#K)3R`)x}#^G>x}#^G>x}#^G>x}#^G>x}#^G>gҏ{w@-:𡞸iԟ"L}@hoj|-U5_.n|/&FC@o{#W:[K@.?o+L44n~> N]MOj`|\pg>ܟRuW籝}H'׮RSMw +B,N)U귡Y{7tOC3nRi7`w/~=? H]s[ZhWq[}6X(5>lbԀؚAOЏ&Q`7LAufU*^_.>;5뀛p~[SB(t|'jk4?5aRUNTCK ލ:~f`eg;A}-Q.o"50/(ݩ E-=u@vjk glU3l *tb/ y ض8BV"z?qsD*K?Y<ܟ7LΓME_UMcћ4y{9X3ijsŸ0iAO=}їĭTXĭTWĭVCOVj&q=&qD{U+Є,M~/?d9vÆz>e5F˼;WhϢub\Vڂ\[k x?OٲaXZ|&p Re=2U ý+-* 1YÚUSe6v$r ),M7A2aѺ\\ƨX$LlVN*|q2&ӅuL;/yYח䋧N^2f~&&HGUdA%ˍ&<8쨠h6(Ư*Ǹ aj nhv7n|r)NOhEŮZo}Kb'K\R$"ɥH.Er)K\R$"ɥH.Er)K\R$"ɥH.Er)K\R$"ɥH.Er)K\R$"ɥH.Er)K\R$"ɥH.Er` ٻۦx.i/E0̢)4jmlj!r!pJy1 pJm9tN"p\G_̷ڏm(ոvY8oY̋k0}흇@%^Xd^fZ:RXiv,mUGcZmUMۯOueZ } Q;r`RSI9Tdm, oma(4jZѴ:;֋k&G//5hjDn{%6Hrh{ѭ-3<=x,,+XtEg9 3Nd9U Ug}qqu$fǶmg=wxv(踧\ 4Xwl$ '3 gb%$#< Bڴs5ŧU+4;^v#?>x,WF2Ƣ2Ӡӳir{۶K#U3l~^C[4yjt]i޽tuy)de;15v~,XVas~3o5Ɇl&u_\u\6|LYUB)jaʴqLh5BP>vZؠ `:zo?7)3_7?|+&)pUM\?w_p;\ڲ>z\y׽?|ҠmAX,]էo`JƧË4{Q^ܮY+XKތW0/aqfEZiXd B {W^@H>h_d-+Agyu}!틍Ѫ,lf/FZ ɘz.*aTT*S\Hɔ9=1@Fz͆WCpxw0&|LT(=+iL3'Vλ l<g7;fG;7]v*tyU8/M&l[gMdrgz2<=ب% י%V Nbmu09lQ7/'ԙn\]&gU.hpR {t/\t}x#?bee}OX>Ȋ\ OTr)<OUfT80ԪZ64r;w]~l6u!_J!l4"])^d麙p`~^|_[qPKRl$/-+I7.(IRK،[xXv^dss~z9A0BQRJ$eK CdyH"E*T.\M.kQbJSIJj)0/LKI1J HJ18U fze>xe+!o\&Siv}mٿn=JgFZ]o9W{H'|? v,psAPdfd+Y ߷-9C"S4 XVw5U'Eg*+t@CpѠBU58zsp|W{x. I@J7585FqB.=ZiZni٧x[g0ffvG-rr>^!ezN_[lћg5W3K&hLBlTrlوl {?]88UE9m#_SoVHs4Yɀ9m$ r-m**儴Yн߼٥~a )℮=?GN#t;ƵWO},o>q~x&!\*z <|, 0rM('X2 Q#bE؋E(}yэ{0 ul(ԟ11e q` 6XR%h!D[ʒυ%DSѥ|= $*a"kGxTga ogd.|FH>َ.wZAglE_/Qa^yyS2ΧC{6<_-G4*)yaL \CH&DE*H- dz{ӳk<fxYe h ot ۜ t>Rݡ/P!M;?h][:Y:9=XeZAOFh`g.pyJ, ?=ZDRe uaG{&xRt&Gg78m‘TGcӠ,H^͎~{5@qّN/ta`1r; &Moo χ8xb𑀬S# 5P,}::|?󝾛5GJ͚CG4p}zu'}wFK-(K JVˣ>o!`ϐr5f;Ssm^H+SO'a|%h9/u?V3`|cJ߱S  1AmOx҂;^8t[2^lYx7Ph¨O5Wk']A:?8$Ŭ{Z>_F[@[ruP*$gh7ːXmL$M@)rׄC9XS&0-9)/uʜF FzΘgdL,~}D*|9%Ikk&1B">2gZq hy)e7zU^17M_oMV^ݡ0eE\xR_qJ >s29\dN/^|eis+ԫSLNJ/fۗ^~lU|ҝK1e%od2}' {ȴt7q&s.Y&E+a$+wiW&HC%q._|leuO.RkiGTK; P! ZKHnũן tFh{:p6,B.hE 4ŏG~͏ /jytpߜͧYg|^8f*bPfޫ.f?Po̧aH^(- *ݮkTJ0 tNiM*7*2|!*lr=Qcmz5@>c(KQ@8fI t|R,3nYrj k{WPd}1Rx?{?+`xs#Y4j--F3} Tz.}}?>Z] L]hYn(pEcنiݷI١{i~8$;ɲ%t4A6c!W<1k'$ "YE;2lS1)aP\J%.2C6sZ.:SM-mn5:P( 7:5S8Fi 2ҠTֺңf)>guܓb̂7ҀcIּla*I1@Jb9Me.VVjlVt\f̛)4-~0-deZ?9]+$Ϧj7E=Y;㵗^]۝-H WU9N#D!SN`Zd@lacхlxkӫm8T`e8YrA'E=)&$t9+ 78 2ږ8-c=RVB]=":l^`; XY\TB&oGWn32K^kyέs\J%K&Hdˁ4erCtJ'f(`2!E1hT0 h3v,OʛSfѺڅ1kbW\CzǮVVڶ=!yM[$2$Nu !mfN*rdm,4+ rE^tɒĢIh8HEruN2CGo{8b2FU97,Jr!)@'ji d}qY"z3Vo4 ೱ\,";0DI[\I e%aK[eXM%J@vql>jdW+E.Zp# F(mmĿ.7N$eLZ`{x(Z8{w{0a:: v8(si6d9~|GmVգۡ0Dh?8S#1xsʪQECj\بJr︧xG{L3︓,1Q`PXK8$!B7 @.fk9]wg+KOr}.yyR#A5T4W (0^ \@g9] (wNNmy.÷`a!dfQ7vdZ2z5KY1=^} jp:^By\*bs2 @;$AVI  l} ݁'ݩZ`(=&iBq&|BZ 2!{{&@g9gfD] פ^Ԥ>,;ؗ1<ːhCQ z$HPp8A:) ’kR*7{-`*kW`Aq~oO1ovhأAGKm#*rC[MׯP5xЋܐ7Xpy}gn9Ta_y5hA[lл(Ԧ 9mV _sF8NQ_UX~'?TGHfY6@{5sg֕dP>bp2#% 5d̄(#, pJ )u2io0g kWg kz@^P5EalhH׹Û3T6Ir0A%"c"$ 7h3idTJB*0ڥYZ񤬵^*.B }YF 3&ޕ65r$~>1fw߰glf`t HC3:*2+3+[L)FXqD/=Кse;O|ȸU/.L럹[8}`~ćÕr?a@/yIS3dg$CָSb B  ?8q;C+\2aX!Lj`)"#tp*S 0ysm N[ 5!O I+qbW0n}w앜I"cq<O3<\e#S D>-zק뛯r 2Պ (JAtΩd޵qc YB\QoFY]<Es}hQ1KЌ^O s/zV-cɊatR oBZZx[b|yK!+u,N_i}(|LVa1 tyf?Y9XkVJVg\ꪾ*xvtjŒGRϷñKUOː c!>ٰިnTPW/˙`: ɻo/'[Xq#0 .]Eď&A;ioiho4Ul5ìoӮn+ڽ>ji+cKn@g?M/~i`AWG#[X3?6Ce**MEvNQ8m@s9Z?y"(ᾪUgs|Dr#Z6q/ AH^J aa-x<H201xIEʳN WcER0pg#=a&{>aǓY0fXKӁ  HxqtpF)rXimPOt{:)d: mЇg}-'d)[NAKԪ.e "B[vU6)\@KX%%2aBh#E$|?X[='] yQPFbvBm!".qer "%1XNtHD]p k 716~YȮt+oC"Y( KK'X|#0ȕ{o/}l>Sb3p~zGs ZPearw4Ҵ@eJK7YKV-I8z׳j%*j¬Z 88\0s,G62%k/9v3|%gS4aw/h-=BG7ᛍo6c6%{vfRY/drYi*=>0Ht4i0JDR:gфj`X!beT;;n#ۡZ;{)'X #7r݉SXaQ!k%F$t~CB>#਒K}E'/so0Hrm0autU>:TL0wKLAz}6eC[P?ror͙ͬ$W1\sBF)$:O>È=)vBĞf B`kC Kw8OkN{\PHid'aR/G"XV$%g$EV 12@plG.)'܇qcCnW͡v!3FfN2 Bϩ^~t*{О{V!Hk40KIkHcx"x1TYgr3h5Z`(3*h2:ڀmT`0HSEL(mY_ Ju~mo O[ܦVe2B-b!u?Q= VPPP2\s 0lQ(Bّpr{GiF!Ώ Jxt@ϸUR`` Kr긘gL*PMCwvSբǠo:Հ r-ya%]9ZZ{/-/ dzψBg5֘։Ȑ$`uHi4)WaZ C9$“H4i N0`yiI1CT8WV ,-'(11'Nlv`{WDMc ; 4UJv@WSN6 < . DS%s*dňހ!d )TgiG Ǔ3߉<_ "2Iц +0~ QƠ[]SAHwɓuiޟ:ԉ;%s1eC*Jȩ^XFutaU*XUTjMqNzjg?P2{)Em[v}BD/!7Vش/]yR)Ac;ŘH1dAt}dAyKV1N_:,ye闊sV{]QN)0iJ:E@k7aõrpb 4x_%ʨSVN+Ӛ|'6BP'3WtNip Ng"O ?35Eޅ.ذw3| i5W)A;U#?dFU:2a ҆9N ߎb۟}fS+zơۧd:͛OPd)<ԥ1\ ؛8 to[:^Wfť~W7,@\BV访T9rѵJ3W!`yiFKp!3" i8軫;Ndd}BJTaϾf 61AL:;k](Q7'_HCH3ִ YREh8aP_JNk\!LmPU!7R$WTk 5%%%ky>1è̚Ҽ7wb"F>6_yp/?1d+%q:gO'i4bڛ[j~&cFԀIW V[@+2~;mjL IYO;s%{ϝv. " OGy'g19em_x9mϢPusC\)ĄEX,طJZcӔApVPbGzcDˋ jiPW xc[jāsc+D#CRKD go^b<5z4]<0)Vu}67n֮#iw+]*8P]pee(#wI:9MCuzo}Ib$*P~A.=P.x3j9/N{тxDsCFXx?d}hz{x1) oi%#1 KFL'rgJʝӉJ݉W) 4\FcJSx;[ )H @- S6,5A /&ݝt$eL^UYyUjP~ǢBZZUUuU;aD +jCWP] ҧvY+@9nՈ0'6t2]Jh:]JXWIWie +kCW .urVuJ(%iJ!.Wi 28pj onOrOͦq;O^Mb|>MJ6(wv5s"|}o&BE# +vb~ٴu{{źilpֽp՛LtKyHn53K\ZN$*iBhoTvôǽuiD%RM>p>RPWo[7_7Cml6f2z]0Gi߄ϣEPi]kkKT+mv2kA)k8CTژj "u1-AZBIDc]ƹNX'nj\O xJ(lJ̑]`xm JQ@8:]JMJ\CT# UKkc]%I\ d ]] ]gq`%jCWUB)gUBgJ#;ZufR) U[7o'%f1V^!†O w2EAbR2-cA+A\%d5rzGR|y LqY̘C:,Ncʠ_ξ#pBƹȓ˦Q8%=Hl6mK^ri $cLK21]RQ վt(cKE]`Jxm*eVUBD" s?;WɺUz%U.Չ@"kCW#ZJhI孫.RYc) ]%ՅZ%NWD,kDW 3Įĕ6%ҕ\:={e{ֽ22P&vut%fՈ0?'}QtʇJ@0N QȠCՅ4TL(%m"銳RlJ[38èEHt oyuݙ*N,6BU 3[g-\OB)J95&pcM`%5jAKU}3$ TJh:]%gyt%(gz۱t9O , %t(n".$Ոԉ0F6tQJhZOW hJ1eA(6tj]UBhCWHW ^; &Ybe (8or@ Tֈ0E64VNhoU&]$M +V,ʇZ>ab#=Х]Ͷ~xtr,kwڡkVfraE#ZzERa5%9E.'(9[;^$ǝ W9v W8lhdz}hޤ߻YoT(f(1xf2I1I"}/j^_`0ìvnQ?yd.asi&IPOVTh߂V$pU ;7!~֟.ݷ;_h)pܧy)bLI ibSI!֠;h!Z?: Wz9!$? Eo\t3^ֳk052(wn6>˷ӷzcwHlf܁S'yx/O3ތnaNê^}\0-f~S2(vC6 ;틥Cvbܗ{Zi};BMcZCXwy5 mUM [V{uS-*@Zt4D|ᮖv&m8~$UWcm9m}Hw˗Z`XQ^ln)ކi#JabA3&^2~pwuDHa:3)W~'0Bf 0d#"ᷠo8:q$2XbP`a WTP%3c8QQGGXH>I)1xޒ.Y~憘%0l9qt28Gۆ?Q4YvOkcq Kr._m ?MÞBNMDv(A_QX)=[ofޛilrK!gspXw mo7 q&>ML2Ӈq ^0EF3fXA tjs{5&:g$`&y`$aVXPN eRanPRNb'5(@QmGsBM:2,"1rbrX+?,xjdp6YJ{K;;M@2GJ)R#<~ hݬ\IV;W+Μ{Z}BQŶvI=xX# >M=ų&T ebiY)EQl+a {|5Х 8G{2N%~.U&iXQgr`jZxi^'!Nh ]62o]uw1# P:yS(zJevG={4 d%_WÕϝ=j.-pG};jC2Bgy+gsOZuݢJ\u3@qyvbdptlZ!Sl$$=J/vo1377M:eW-r79yJgY>$XY!Q /-8PP! X*$H64rj  y(/:\\N.X' 1ڠ@4:&aQiƝV-X!gVdlJ;t={Bg,;u=B4"5B)`9J,XSǜЈY?URM,$F ހ%ӌqRkM2`13~Qf!v!z:i`0x:=ig#L"cI@2%j9 GKԃ ( &z~|RiF[b|sK7ۚ![om,,o`0#F1iɪMoskJV궾e5 >_˟fVnְ~TL\T[7q W_}:}/_yzo޿%h`Rho ~<>VTMC{b{4){=}էv/UZ3sK/ Jߵ}z9_&.mA}5GzвҚ[[~ fd7"G:* JvQ.=,yd}?x_/_P&Җ W>_?kM#>qBGjKh4^(@$\"ml"E) ;ذ,_=U1 อLǨ1ZBLhF{hB+ȾNoRysg:0|&=4p2g@{GH)zM1/ rSl<٩|!u:v6UMYv0^bO d ;[(,u&>fMY&;pPЙy*!QI!O!VhC my6+(ukLv)zbTB=w9ް=0!BPfA FC?!IezHMLc]moG+?2v0wܮ7%8r-1I/}gHHQe Kpfz)RqREVOP` ^Ug7[W0h'"$JgɣGt*)#NpzIo!H)b9$Z<ڨĀKCCjEzuФmrC,sR HUmijH0D  buyzui<_7J.@K+.(RNvi2܀КLױCұiGеJV,ɠ~:9EzF#8͛E@ FYY#Fo sSNQ(e;iʷN>>y5bzp8q ]|yW^,Gۿu9h2͟ȥݴx1o{|d1CeIt\q#`j#Mݫ۫/"86 j5\)o> 痀{vBl݆hZ'@w1[v\]s_w=z$%c^߅xBZˎ0~Zn9DC܂,6^;̎[{uh^l!G!O2r$zŜb Ȉч#(1畳Uצ ѐ.*a*e` Y];8]L!32צs)؜N'ߡyRndݦ'tY&,'1k}؇?K5 4ܥd j ֊5)%Y\4pg>x3c97T%cpg@/('N  T9P;85x#;ۦ/=Y;iosLѵW#EZȨGQɐzfI=:Grm_VaZ ֥qVN%gShLBlTr 1ӸFdk$O{i[*uҟO]|J{SHs4KT5 FR-uRNHE%4AxA|kmҏn&7W;MdRV y*2eKf0:\Kf!䵊a/0 Co0X$ugGt1));':0[Ђn-XR2h![R̅K'!=ZKS k0J!֦&n0l9+='kN|vKTxg[g|֔ pKRh&YqJD^ztŐڨ\zgLMV[/$pYu o#ۜ)h>&,8 Fl-OSz2$&xȖk,46l6q^%}Rq]Yd {lgReW^>o2آ5CC,PT;nW^aċ/~ ڱ$f)%N)(U dUYUr{ٯ; "eUZF fAdȖ'C0cȬ_qͳZqhy!O7zU^17ņVmlܑ m$G_犸mnp%P*'Ca&wЬy]]kR*f>= 4MI4J2lժΉ+B+~+OQs;3wvfMub |?zbR=VXVR Ty!fT(ǣy B:a|X65"GL=`J[: :/G(EDr͂W:dАiI *BJ!!Z*Qȁ{\ae2מ0& C% }HK&*,37d=̚ج[֘AFúG1b 7!fkqgy6chpU)¥F`J8r@>{qp=pig™ p 3&2(%edRR JH! ^TNg0n}Axٜx\GKdBj P"s&`,2ά&mjv Hș;]zL,5 M UYj jn'Ժ1=^}$*Us/WQ*PwXav$AVIlzL .@v?@V읞؞ddm`(=&iBq&|d6,@7>qg2 IxsfF}ã[;M~^SIwfgW"}r,1s#`$@sF)RLj"ΞTI&]RF!feʺWjsl>v}UJx}ňv_$E{ogdzYBšhoEON2T}I_ U=R7J$)syEU١%]*e vXr[cwR@o ^#Y:R륓^Lɐ!V2x%yG5vw f/&l}H!0ȓ\l2JXjSӑ7NR915"r:&qo@oӃMl*ϭ{VC*ޥ(uĠ1,,$D:3׊Sqʌ}=+?/tvKl%Rӵ>t?-bͥR{|lzvL_@e?7aD{V({?8bym24vh(_x|;W0EGQ:~+w_MH?HlOZgiWh~Rj4'h%3Ld*mqҥg$}gwu7KlDZTD L>r:ѾM<2~lhw|󟒳(a⾎M_uНh.zQ"wx#W? %EG^0({Tkw_<hOO姓3wsjv#5pvZ)d"Fg\4&J-LඏDynP6(?yA: ;zc^0͙;2WɰM)dt9pܧs!z920FOg>q2Ҡ *)kkNjF"x<9h*jR̒Y! >&ϚO gL HIZ:`- D**j5qv+j:.aT3(:M C7qrzjMrCi{mٔ,8_/4&mpSh6𝭆JAU;^#D!SN@`g-@lhaAђ6*%AEUh͈:fKuCB 6Fε TmXMݖVf B]낂i(xKC]X>m?|Єh~4|*ؙ@ %5I<9.%eON$B2Zi, 1 :Yc(^@$Mil4 B+ &ǔYv϶Z0Zy(V踫նv`oN(#8)b !5ЧT2dBcH1FCe $"dB\Qedk0hD.dhC=ddClA CGo{4b2yѪ97,Z%9 ) 4rQ)n53VQl4 ೱ\,";ѩdV3mB lqSu=Ƙ$ìcTHJZ7.DoU{uF=DLh8w׻JD,BU6uJ1MŎnCu6*BR҉JŤƎL9[ YILKp-<ܙuv;ž8}q||70a\-`\7Y0&k`re6匴}6l7Yx&k Zp) PmW>:#\`x6o.~)4Lv:B\)e 3AYBFWn1JkfWն"\\U.Wz:\V6}Q 6) @ji}W;qo%_άo¥ws|iE5>8}[ FA9#LH> ~׽d5@irߑ0*\RƩ[ !|82J*s8!oƸ}^\C d1,\A֛8C}?S |B, ۙB\_6L9p#b׽٧ܛfe_ˁekJ3 ӻޜR m)8%8 B*$gBl 6'o[O2 ܣZeTiǛV=#9HX#Lk\}Fj8 ] p;\m[@{ +PV jڎ+TX#Y@02\\Ms}Il[0j/V(S05S J PNhq%ad+9W(Wf]Zmڎ+TYpuA0<LZ>;T;L%A^v }6":rOR䅴I}l`ods-@-mE("R -JY" :/Ἄ.O^yO~VoqϫȃeXHBxQ̧'yYo[7A "-K0H%VuZ= וlԗ%3Bp (1hpv% Y$פ`J١7 h&W.jF*Yۮh.vٶ)D+L9W(W\pj m;(3WG+ƌ9  PƻBm0u 8gp% l+T{2T*q%-祉`Il>}W.\Zz\J;\!$7Jp5g Վjl;PWJ0݂-ě 6&\\Ce.Bjm;\P]}x+\\ Pmv:B\ab kO0r 5 {WRw]Lj+K ]nil @>pEzpi_|K  XP QkVcU.>JL>ĆOp''%74SهwЇo$X|$\q৙Z}t-;LW6xmSi%# k PU *Fڎ+TM#Sڜ+8 * Pmd1+xQpW(v\JI:\#4 68 jOh*#@"跟O@dt ŸR'~p|mOᯗp,z(M6*n%p6C}.ђW5X}6PN.Ga8_^vKAoR3yNFNNOɲ1|΀1<'K68^9UʊNXQe-;2*MZϓ^r(pTaz:VhQKT m_$SCD7aIwHK^{o䨵qRMlkڮZڲ<עm׫>6U"0lt6gjVUwI9 W xڮpr-WVRv\J;\!Bd+~Rq32\ZaFP۶YTJRk~\`O0rWֶ>QNW x[ W(wsz+Pˈl;P%WS;e hAtjqcew `(\Ni,+d~(xΨ5NkeW[Ԥe*9r2Yv*fC&qkx铿)oljêWn^#৽4{țU3e=TmZW h Pչ Zz\JE:\! |frWĝ U q%0W XS PƻBH!JETNlprWPK4WXEyFBZg+kj Um_W{Ꜽ+ W(dwjV"7SE#ĕʘFA5Ld0T"rU1RZغé(h )4ɷ5<Ql6LZ Rt^QbZ aIR.@QI[u Pr9*VwcApUTW<\oWtr]W,Ĺִ$JoP2thBSxP,҆$ %֤șJY7TA 4|p_\yh_CT |V=eRQP  @Bp1r > PmTsjW3Bt6B+P>Wm[sv 3qcuZx8;w34KX >OgoNN>Cut;r_GLRHe/J3heu"vAShXk-n.'j?N*S\l_x7h{ R%'*f.]b,Vj?~^E._C 0 6^NOdpzqTS8- "R$J^LZ$):ibt)W@9<&NK]7)b_WO ggI5Z$|B 8. 9^LI\"%%"Yr:KL-u(?-~j&L7h2]`&h0gp+de޹yiW/c?X~?!υ)u:CXN4ֹGP`zS' @ղU?7oő'N>:W~ŰYا:~(}jC0\P2ÄY=5aRo?} <× g˙_w.)U}_q g1)\A2oOË9ZϬp#(8-h,v-V8[Gʢt%^!]TF+j inM e Du:Wת4PYP Kةcĥ v[IRKYUI$%xZVkU|be?lkhgrvnSMCz0Kfe^<+J1>ۍ;G'hO  va\Iٕ<hVwRė6Hv0LIoue;>e)Fy."偖:_&k ,2UW>u@7gj>3w^ӻ{e}L2էt],Ӿ8[pqu~puSVo؍|%0JGn]l~؜ӮV "D7-F'%ԃTEioVpq,cq4cpʮmgSlZK/K͢(v%ϤO%RHXV2' &he%wZUz. hZ \h(8Kc(q\F"(^g, bǡlZiڢ蚉'Ԧ/%8quy3G<⥧^- zZn:MfzeyBX"+.Oj|JYǓU9cOug^uzmXV&o#Z6L T8[)EmACKlZ7[!i{ cŠڂ&2DļXi UepsAh\ɠJIT ;?6tմICp{8*Rsp̈́t<\祉%%KAZ_CkHmHñ[0TldN+m /އҞ}\乽Mp۾Iaj%U8Aw6[mʖhm#p;sFf#L"cI@2%hsk"-C8WN(?u ULZ|]Oٽ7<蚵yCuݳ*xwt6ka#ib?QY-wRl$6woZu_sS{f~u9sl8:Wߧyy뷯^~ uv'go^Xqp֑ IapkN~}jśNMajM|zwW7k>jiK0u3[2O/nRNr G:hkM@dp%q~65?p-Js*I-K_END1#,]̖)i/[ /6ekN l8V[BBydsab𒊔)@ WcER Hl䞏pxpn{0fXKӁ  Hᐃ"Rh尀/۠ot9)x2|-ڿw34' hg/YrSl99@B6x¬jv(lG)FQa? X?B %r .zaD%o~8㜓Dνy}57w7X\js}SUE{ϟW ~6p5BY<VtئbTKf w( $R,Fu(3T'xaR[EB,ӢtE&xƨNAA4~>Lg5"X@iˆ(h q*t#Qp;xDR0?&H)@9ά X}Q[MsF|Q!ڦ֞J4#ؾAfŞf B0@R i+,ui| @7fJ6z6EԋE#BXYL^jZTK` Xy$RD>j{#k\> >\RyV~h{UqˇanκU+\L՘2Y'^`VxN3齔$("ƖE@u*'<[1ƌAc,6ʌ ;^T`0HSELӑMWY_q}vW[tSa՘Wi@Ҳnqp䁗FeCݟsG>5nfXP£=VIA).((1eϘ"T*$=#ICyTJ_@o)vsGݪtf=|-9hQ+^CKw j1!2l!ImSqVW_V2Puٙ{$S@xN liik++&%IE\ᣇVip_tF%{",`ʕNVk}7zeYZsGP6k.:|z3C"5:L )BhUxuKnV|rnR.Uc|f[=vO+PFxeZӲ\oS4Yg!ux}_F}eFae W,YwToFTON]ʪߨ&2G&~ Sԯ;.`8L+moo-W)';|!gP:mpH%="2`_uW47G5A=럫I`b4߰0T{&y(awab=I`c O"g@۟Y@{Viy`ߍy;&!]TD- Mas=ay;f] :фKRTYUfQ( l#4J QE%?M`-`[WFm51dHLfa)RlOr0~~MZLFaJFvmzaQ_ D4K ԙKJNe}mm| >HUg)\[šeoʋ[-lY:ܹ* suo((YuN}۟N0}Qӫٰ OF ;uVM,q,(ݘ]&yhpryſWUS4A#wP b!ԧ ,W`/-L `< X,cRB8|8W EX JhÚ`e1`QX!c@eqc*h%{gqoR0#4'(xAcð1rvS/ ~p|9bՅY\9pЧte 8FQouZlFdO^?XpX|3K`1p&b 6,kF8\kǐ Y~pVlOhiސy};0{B%9a""^Xr ē@41"A5dՇާ:ݹ[}ee MQKe:(eA[ S ``tTV ,ϣVhZeu&Xym}e6ʺ_5DqK]JAi \pL]꯫ԍxɄr'\9FE }euY)2eg.O 7t(ݐ#-G$aܛ-{C/١Xvim(}쵲2aa^!8k wr5UTsc%bD/YG}&JY-2 UJx)4m9HȜ6ɰ% DF鄋q"ʬ(M@B." ")`Us|)g2,xH*S#+J+L9Z+f';T٬X7WLK骖sWL/=Yɡ$5yhzu&z{J8o}6l8^V6GRw+=~,råU8t2MEˢ-[Erdy6H$%o\. $Cnhs ΁+l13q6#cw\3,lf슅c,=>{!Lϕ8G<غeV7;pg74a8^| #va°IfV5`PzGh Ib&eXcdf6'\[S9U{!KbJVT&`g[ضЛd]9ۺg3b(/ڝ͎]QvڶGMYf-l \B"0Nd~YL$MǢ ,d ! [Ie$$]2LYHlT cY :<#J>DLyV&tply6@uȁ"ƤY_ŲRq&!VK-iV17Tu+q6#b[i>Dɢ|!:;%]=.͆'#H/L(Fj[F] >!YdBW4B=.>. v6;nx &|Fn6\\p\N?*>[ s㫳o4EL Fj~?"t_٠ 4>8 Dr5:MVOAq]h{?sTQDQ2?Vْ`BC!DJ$JhB&wOl_Y#stHԖ6ZNUwkWƞ,B`维/IޮO݅lv+Ǟ;B%1aʣ)rSXTi$En"7oȍC<"b8?R •v(Ji.OfnP_b-=!ŀI}չP3\[3&gF7tXʲAv+wn-L~IG^ L0ah%nbh}0 x;R-eyOfVA` #Za/x2H~J ?_Fk&5^]^υBϵCƯ}L M6OK_~q@ GMG"`o&-aQWiTmcS$ȍ ĠdAdwI(m5N~2/smS$}O׵]$W?MJvqO]\o:tpDp+eZ \Ui;tR.m:pvJj?"?⺣+@:\U) pJS4pU=biUJzzp}vƷjR%_l>q(87٪I>4yGEC97H7ACZ,֨^A!]S}{u, {>l=p;gEj7Tfo ??7~ET}ϐ>w)|w͸<~"bwm@\PF_d\mO=c? xv}.;_uOYe'o[B}{}{W{bQ{Fr3ol͌Kz,4hh@꜔a!\MVԇ]+PCAz-YV3y'W)e*$u*U.HwF3 R*wJ(" f2O= d"&(N:zEIv9^ep>|gUOyf,+挐.PyM1`(Za) }vVs`%_;abbZD -|\pQ)MM-C^sv&Ζ>R^SGSx)Q4Fku(1 `,O %(-).ksLYdIIؠ)L6˗f(, tC]X.l$A[^,EɪjGj5G) '-!/)3`Cz;U;1AFB,2OZ,ס/芢`Ԛ %!dz;$T*ïZמn'r}2Ul:8zw5xc/+<Ӵ3{م'PE(bHBI"~tRX|jOAќ5Eۛf>m>D+b,]sxMץ^:~v>U4kU;5X@Ǫm/hq# 4Ld~!S[t0iUC?_GG/_^m{Vޖy+vbپ ;L)UM:rCn}ۆץŶgZĴGm럱EEbn0{>MT24=O4\ L HH`Ru}v(],ֳ"iCJ%Q2)4XI9"2j|VImQ+it&74RNJx3q6k86qE-y-9nyvzE7zd/ru7YQ)Hs2䕳B  )=Fk5W3핷坳$iZW~  ^d{Ǽy+7F0g 3iluM#S "w^y}oc藲jK[5)V~YX`-D^5љց:GA)N$7(ZzSDH;R1Z+aFW&FGCbH;|Lxf^N7W^3‘;פJ|BQ*%s:, XZ $T)* N$.eGR.|4NtׄQf-A2c %rJ*>5U؍6;_f74=_oz|9m^[٥k[ V8}X}sݶG79e1o*$]Q}zw3{}ܝɆFoYi^6w6|g37{|{-/ݹ~>\k[\]qyf2G74$M] !,ye@rtJ֡@t`M8엉^oˌߟ?4 kVBAʀ6JrE 'Xu>UF r Fvپko'iV%uҶY`<䝭 ("QjE#;Ȟ=UgH-IWm1B( dU^{!(9MzO!P2B%8}oRiLS^N Ni ȳ$5QP $lZ qq-̊Mkn]A4Ί5?{ƍt vUy*u6٭U4 ѦHl)m̐eɠD"QѠg}phVW_C:Їzv]\ "k~עx{8=ݐ(fEir6: _֭ z %]8qq|rMcsfgwYOQyUxq1=}qHn} Rwz<[nHg0t1%]=~|/Y%!n|Y3li3Rȥ,v 様G9Г6{銛ñfinu~Z]vju'Gr?$vaΰm/>ksޠ9 vǿ <~|'~GƐ:W`€[M͛6T޼inM[Vg|vm[^m1 N;{y5q秋/Co GYd^ -ף&FĽL2\Aϛ\ޤfǗVid PCaybr#A𒐄C}eB7u%6?odM 1! 3(6-JRUĝ429 {ayU=vq0p=pMǙHP{ƂB)yN@vxʚ@}ee1ЮYә+R37Ƴ l!J[^fey^MZզZDS)Uy!pmV.v;.@? \eU&E4 q-O <T;A?)0& HN3:n$)m~lם U)fVgjIktaآ&ۤ56'Zݨ/tјMkr#<3M|4(6Ԇ=OB7?Y&qi)V,23{s:e;Mdt8eOל[Ɖ{= !N;3Nj4KTvT8KKG]o eفP_1N67G9iur:ƣ$o 0 r ; v%qܛAx0lg7/rp:D-0¯'!tzx}otH",vUBHv͢*`H織UҎ$@5}1Fʰ>R[3%~g\zi3k`ϵBN.cb"@- W{gPqjّY?YQ33g&0}Is d-۔oŃl&f)wsh6kpiɚk9|oks+Ջ%y^}E9)R)&sv20W&:@tV#Ƒ[YӋ~dEQ \"y[S FjHE0) #(T J#c1s#c9R b,­'M_N/23^)/;Y~sB~o2}戝 , TC9̚(tѰLF2CYP+trÓǐ=pg6A hfA'툱 U)D6k&2g=bB]Ab㡨 nQe+5"y #/dkԀǸ QRC?&壌J)5JaJ;FR22C*0Q#\ُ_0&Q%6$*,p*l^IT;n)QxXsc;>ȖynRAJi#BN82Ƽ3HH "zE&d٩V@M^wӈQ+jUz% UDJr)+x*MKI|4l s.^{IfJkf} hUf7 LgĴ|| }/1TqXiaA+%D'-@dўUday:\DV߸sDAOl[wNYp y05JEr0.Ye#Z#%)%XY"ޥ]:J>+}sZLM-p :h=ӐbH0X0@1 b<*1ZŘTty,2ғVYT u5Ռ=gP7J`qK>Q*,Ͷ&޵Fr[ٿЧ vzD)` N17>=5jAEVw{4*=Kuc UsaYY`Yb m-V9׀υRGB'bDJ=bO[]J+"i{*ټc{2u=νa#26"wKדm{s*VHib;ES"m#oU(] lR&CLLNh-b4e[Rї)z$t~b.yM%0,ou׻ޘ54tּ8-cf+1t{opG}WϾhh~秿=*`Mf۾)i}\پo㢵Oܸk(b Bd*aK} p.ݛZW0PVk_+p0t,{׍G?],e#_6˫5{rQlXLxy_o/ ~#KpCQng >5NWJWTF:ñ➥!ϼrΊ;ew%elttFWm:oZgˠurq؜[ON|q`6gkFhoOb0W_ؔ'}]C\ a.a{YD\uS+>`p4)Q F`d i~@8e}\>_i~uΜɏymsn>BJs$RƷ*,*I$R4룴YFx廚6;goӒӄ0zMZ>aտ o_hU2 d%[r[;B476KFFaHKвU[\P#}%QBrM(c&BeEB?ZED1?;[EI#L]z嵝2)W6Z4H7Çc0\j͘\"R j"di(It0UXh!`L"KC.Bؔ LmC)JYgQ}h9(+--:huV_@2r˲8$44{ A4G#H+B6**RCWaF]n>劎ƨ2Kk޵Yܳ5Y^E#B`AC<\?eᝏYlwڲ`<%Ctȶ8pw AQ55mU9挑9QQJr-Y kAq3V7J?% MC`ke#8: Bяhk0)CZq,ei f`M$ZBځ_jdH:!`L*To$[ `14i]̔3y-^fV [AY13,HXF.hI[V JȑJ"JEu)I6Z*e»A덶A) V"@VXT`t[\ ԃs{m7p;Kuqxt&M& \_TlMy dnıD6VlNcb uR4b;kơ(MS3]]bn` XT *5+V#lR!* \EL0e+ JQ꫐Q6J` XPk8(&J eTC d$MhfP57._s?q* 0}R h5a( @$ 6HC6ݬ&+eB>7[SR :,,pN:L;‡ r `:`ܟ1A %3n.B% ` ~ʂAN[*&- p]BPu2͙`(q seԸ`j NOڲtmDj $bm DSUmӓIGH49DRY}&1a8I !LDMe;w 1¥lßk-٣lP˃4%MG2^-,4iҧ-KWrSɁhKhtU@sc $(v5kė4XH脼VCԒ"4E"eLԴR IZ0҅`CM}B$H.:4pnc2y[ʽQx3(nCe6 ⥛s2YЩ ֏D>^Հ\Ÿ3jH 5Yre5k$@dx*Fon+ٸ󨓹hOYs`I%>iBx2cv)]L߃$z1΁6D. -r!Zf2(BQ{CHU$\PD;QBtX:"t kTg]I`cN|0rFQ z,,5$t-i̓BwzH"}R<$}x6 3)H̔ȢO&Cez%Ѓ\@ CCX("m*p(yCPOJüO( AVE!ǡ . h#F BB 9ojWkuB;]u@B  T GR ,MG5 b+9Ɛkp9ec }@b> }@b> }@b> }@b> }@b> }@z> 59;>~zȋ>hǘz> '> }@b> }@b> }@b> }@b> }@b> }@z>a{|@V!\O;J}@rH( 'P}@b> }@b> }@b> }@b> }@b> }@b^h]˒ BQTfK)|pb> }@b> }@b> }@b> }@b> }@b> g %ӭ qq_jzX\_֯;ZΖgyqq|p 2J w߭&Fzd쏳>(W76ﳖȢW[3tr'yJτ90t h}WfG}YW1s)/3)1Q_C+6#n@evȭ;[prkhn',^[j[G,ggůC,ze?F"ϋ C-蛽 Zr:F?g~Z@Q!!91QX$!/=gg>WONת2yUWY;MGU:国>Bs419RUZIilcB:i mV՗֚/SUϢ{TU]/ ;W=;ppҳQ W/i;WX 3pUzWG(9\(m`zpEZ!vzvzN \h}p( W/ AU8+M۸+phW=ʭezApe?nW}:<><{_$ěC{}Ko?}(g[k{y?İ&w5m-e(BB6_oWqG9*1ϊ߇:C6πXVZErRty›E()hFWet^Ek6$}jԄ\1?}NW~~/g_ƳX)|:?fJE$C2?;<&/r?0a[<Һ%z}x7NZ-YY=Zz~rZq6?fYa٬I.bͼB511ҼoB9.z5noe؎_?k}Sk\Ə!o3@huo"}/}0.wh-vۿd,%e-vO ܪ_O狃}EPORi)nKNcElB[Ig &km8oAFE@l8@^/}Q̡V+zx8$jJ=+.3ꯪP.l2. %1+Odew spZ j+i>a55_L)f"pm9e ޶^|n8ĕyyaw|۔ϣ |^尛|4@>縆r4֞?_pdF"YJ6+Ny z}p!g~2TѽMCN9RF`z%,&'“B\"dA!.Fn!^sLy4.v0)|5' *sF:KٛZ}-}{Q=pkmm/8M#InYe>1<[G=QGQ]%1Ɣ]ox{n:_Ѱ:TGAV{e5eC@N~#U9nGp6()Q)a U[k4O__gMH_ȖӚBi3s#/+X9`-הDmvEׄ zInt~O͏~W\S:OoI/Ŗ-!e,Wkƥ+|+)`G*c+A=Pv&X7Jf΋dj9=L΋s A)6Q8e7f+V]cSb`eӕȹRӤ=$%z=6!N<=hPzѠ\Y$45A>ZBH\FscHQ4Dq߂dsl>2Ӊ6 &'Teh \Q UTN?\gZT(CKm.3 jg9.UъqC7~ܭMn5_!)܉Jnnw`nshdp;귩ZvgVwIc.gv-Q=g햸ڧJAQ;^*$)# ymz-#ZH0>Wƕ[Y_YQXmAXݚV) qơPօӅGՅ M22n90ZT?mPɠni AZwTKVJXDQegMkg@0@$#ԡRbE\weNnx8 s=,MtP"WFж#&&,R)[c0qǡZ[ںoD9ⲈhhZй:D5\N)%=-{$;IR#@ !&& 3A!pDGً$Q zZ>,Fn}cY1F,F4@`אϩ"G-8"G$W''A͢A7WL#ZCs9qFmRrg|PT7!%̈́MD5b1rvk+WQ/LNYKՋ^4^=4RgA(NT*&uv|d^(` M&I3I uOESч0}x ״jFm2} o .XQ~l\..5b\p\uQ%6Ap*lIT;)Qv8ؑ|#sC}9f[yupz{8 0=Wa͌I_a&=бˑWȑ`'WALW_ jAV%#Wn2?PIEIPIy9)k'ATYgL%*Fu(9#Oσ ,';ʣ"GAgD9NZ!HsD@ʹخl;!r4ᣱ&A omTDɨh:BNKG"gWstJ|%m+ʪa% [S;3L͢ _ ML ;S0,n5VءWBG>:N2ԕ*N*f #B'/wv9H$/ A"-46(}$ZWFy 7 9F,|ZKIvO9$I*IPDRBH1!15gZiS kSD0HH[3o'P`}ïɿ ڼvu\3q4.A kЌ,r $uZ)O|8iu)I4cR:_ߗk̈pz u%[I$SWݺ w{6Cc [7ϟ[Wۻ;:]}}6e-v>yxw糜{r;?G-n2X>>< 5uYqe#'#^y,Ys;ߵ0HXyIc|a"t{XnmJ9WT_8,hYM?;Lh ZQD7 ªH-cڱD_WkزS->Lk Y:PW%R@$mr1'lD RDCQӚ(4HhRHB ;\,y)ZK+FΞBuDhLI?TףqhCl~ײ֣Eg>YY7bS1n n^-U kƻp̫ ayogu8Sy+Op ќa.z?C #k8p\ )m !?wAz?8{hA ϧq5hd aPN-9ԊsÙk񃇳z ݛ4U)XOQ x9O;VDL,ȍ8oi)s '|Js"ĞoSлuiZ4/Vc/.f*2F(y>gaV8'&虔jx1rUN[7Q٘{k4kݛ6Ϯ5qQ}?msjv*0{J/}Ӡy5[ns G&M~/V㑄}~x?O2w g`!m#Ag CYjho>47b-+Գ g\[[}*>fEhcKn#@}w0_aĨ-Usz`ɽ8b~3-9ׯ{aECSq]PnbB 7 iKBے1}w&[K<>ZG_e#7pX HQk!Po=Ee"ʁERQsO p'MH/l_OpUJr3,$pN@;M&Pl,z0&v{:EtjݞJb#TW@j مV+ftr.Yhy+ _ʃT4B=$u(FC[hN^.wC~Q7i0DJxWBD*ιTwZ^.]DΧGqԽw7lV;\ؾ_=w?Bz-A\Òt=!A#>~?\AoFwx|_7vyɼ!B>q'тK۫%#ۖn\<|H{ן˶g{7-)&2 ׯ\{ۑsue]K>t+!-GCZk/|_|ĬI z2D*T޵qdٿB .6-֋@dfFB=%CC%QjfLj%էn:^Ǚ-$%LoшЉ.%~S(-UeuzT撋  ^HmJ&eҜכ*H\@D 1 5A)΄DP8* Y@;|mp^BNɧ+F[".)R#/Fؐ؂ף*|af.j #"x ׊poYN#$1Hp| oǮo=w޾4Z֔Kw}+VBk*{s7(-X^ F)e ?ş!QL m Kǖ$KJrgiϼPhFٝט-ˮ+?][ TK^*H`ZHN ReLu!WLe1ΊZ1xP{&S:< r#ˑ!H)'mQqmp;c/|YNJ_A tPm]Mux v{<_\N߽;Y,';2tLdd0Si@8Eogn<=is2lh<ʁ~~^`RVO؏?y346!A2^LQu$ BH×ǟfX]FGD?/:uAp{,NcOGϗUNA}m=ѫ +ER$@E?ab f˝}9[ u˥~}|? -Ojӛ ֗4凳w_x;鯶=/Q)'Ce䧺k'p+6wg_n7zSfc+L>T6.X_EW>AuL.w]([sWS vI#s1Hc7'ǫc$g(+͌ʺ Ky{ɶݕu:OߦԠ%Vx4G(nx-]"qϻ!Ff1`f(^ʅnʥ~v,6=P2/ҵL 0$ 7Cly@R&a1OYYEvчV7,v],dut+@O]Y9:ӽw3!Ȩ! HGuT1V:( $7VrjY5lv-@<ũ6)V)L8dDI4+rͩ-HiѨ\^syUՑ F]xjp4OG(׷ד{:q͸r;Wv C1N8+NBakͬbwats§a"snJ;I'IŵE\7$sp% Ѫ\3JNdd=<*Ü_|4YmOQzgRU]+ Z5Õ-Z3ZÛQJwU VWøzEʔRDU/aˤ̎ގ݄\8U Kcxk<"vixvaønG -@ Lq>íi}%SIjRN( ӛ]|,MjZZYYW:0fcnG #W]IP&s FƆT_6әhZsQlrFkV(SmW]OovlCbG~ZaCWl5m]vtԮGFkč&-trt(#n]eʴ$-t2t(::@4U,5tBfOvD +A$"BJ5+A k:]!Jhڡ^$Fѕ,#\g0tQ ҕ?nWeT5%g+M 6-#`mXkJHk62U7%׼+Czq ZQT)JDQͷޡ݌7N֭i ϢiÀ;AҴQNr_~.UBٝ?iMWP0m³ lSE0v2_g7;5K4<d>%UсrkZ пl/&!{e˂kC:)o~PǹtVvq]ybl}_oo2/a+_pXj]QmPSL>Α]oso}[ͤơ4㟖W[Mq~ oFX~_ȏw]ýO8[&oy;t[o?'=ިOEGRKz⛧S 0Vڛ75=!9F5VjbX4hQ@A981_ {R]";Ec?).;Y&d?Rg-ךR<!&ji.*`wItOZ#U!2讃vpr~Y>?#~d+&p6_?GS[}4[5NJήZ~u1wK3Yi_/y[wlSύIjӠk>CQ_`"淣ft6?V/翾̬^#;\1x=N^F nU__O,o~coQɠR(ity ϗ_Qq31b[n{3cp)XzcX1{)0Oѝxuy@Uj~yO+}5ZU{Z|2CLCF?+b-%\v\8JpYӯ~;CU◰e+Ix +eKG^Z ߱ r{WiTGQi{y8-VoX.=W4Re x)&A D\cD.wӱuue c5GݭjBT@Jv}]8?I%Pg0O_<.e ppD(qCRϓ(&sԓVC}$Fq:XhR68K=?\pn(&_VA:4qF2Ҁ\"ח0ޯ"O[ϗ6UXU~[䂳2:q"2 LM0AGW\D/Q3ID"JӌZ dB)jcAawxpV[8Q!hA!j+yxl HiH()%#q$!!GBuL(vR| E,}`ěW,?(M=Ţ>+΀T')U18ZO\ ޕ=m$oV!QJ6[!Q$)@aùkјIz,|',W%adQQFA * !8WxW``΁ ,'Z;&W܄j~MZeA%hM 5RZ]`3G}ПFTO {C-^Fh7NJN-ΠH$Xnt_ק_^>:ʐ{4g KihԹL{{ջ-̋|ji\o& jeYe;~ZU,v -R6ǍݪQUmAvzm5Ҵ`/ Kҙap4[Nˤ[TP8d -C*ؤ2F4,Vi>yMbGJb|HwD81AEk" EL<Q8-=CVj2(:o;cƌL"^ˈi5̜[!- kMgHJ6)?.dv2B~qW:܊ո֞vsidw}&pvzv u1}®nޓ:I.CVLRwzT.LtzZgк\jUwۙOvth]u,;fԺ۶^ƻ;šJ0.-͟nXy/_"2莎g`fw݇zKrʂ4˟77 Z`lƪ@k6 at{L:$+}rkm1+gcC` <|m0z RruQafXTqLp$gC#*M̬Hi NqoCH%J3Z?>8C^ED!7pIA5c8TZS*X T Qǖe0`ƃׁDdFPA!Rk9)S+6:'!^e-(aSkwDq៽aa}c,>59u%_L6RrIyӻNe8Ut]eA;z0J{uh|>3ǺX!2`)"^HA)k&pe)nllU  N[ 5iaBRӕP)?¾ Z&d#WP4H^J aa-x<H201xIE"E) 0yY= `MOycT c-!N&4H#C=Ca/۠otә[ }P?Cc4g@{{J=J;:\Ow]jv^&lbR`tkkW?9gm@Խc^b涘;1sw; l W[s*KTׄ fB4]M_ ^.̖~xiLx_ ^.7|_:?o~|x16Q;9U LtƐvrȂeb-~ԡyMnlGkE,:@I+pp ȥw~?C?O=ܛL3k3f#T >\Q V<)Z\gرC(O a t FfH1yZ/@7fJ6~F,&#B+`"RSFD$%b2+Cʘtgc3klFcR>O8OFl].6f)nڡu3s=WmV0~SB<_ъ= B ia&֐ƖEPe ϼrӘG"h 6ʌ ;6`#' $ jCanl:9x8A{źHųW_-6T6E3UoWXq˲W0ecwԿnHq? &3*(l[$JA^c֪e9,nv(5f%<:fg*)00#9JL|SJg#in>\Jo{Á^2o݂]JNM6^R)M34kL [DdHqH0"r$srYg|+"-ȸ `x =;JI@#İQy0ai9A~sJ`˩kڪTͭ,3'L@w0;ͽZP??yZc1%hʽ^pNEq8οazܦ/Wv9W_uVԥ2  <Ҡt0`|{=Nf*< jͣ~׷n"L2F`;`\ Hڜ*) "cȖgd솆*u:M0-cgl@M|rxƷ۩@- E$ֱUSԜJĮ./ 'ROLmP{`SW,{l5aakMbSlchsVud5V#{ljd/63kjBpdF(+{٩>dra S!dc!B<,Z~G=qjƣO*+2PS[ʪ-m}#v贙R#Ѳj׆@+]tڛ=i@o6Zq'&dr'bۓ)uN6Jv"MŰpqⓅ tKNiIm}eJe%U"2Yeh-/\<Sd#n<<žIױIAҽ#] b<璨]ŦApK'~뛺,OpS;V , ~{`bSXYmvwX"`aQ FjZ6 5YUYan+g]jjt2T:תmM\+0aS-*KGTli63NWrխkϗaO5|soTj0[|-S^a߈Y#٫A&W󼗞]"!Ňu]4!KW8[D`U)ʪjJWŐސ!;;=C IT&PMkTVACeKH D նr0!ԲֲNN=ظr_; n|zC1([XcZPSUf6eo}RW-TfdI8U9յ)QlW0gyҩ;kպ>,|W"ԡ2M틿)ˋO)&ВiP@{j eMFQXS=3#ޛJ{Na$m/}I'&&K}'vjb/vqP*d7Q6Ι l^.`ȖWhʶES ad>Aw85~pSDž1mF=Y&EKnO0ۓU% s{":UFbNW"}.A+V ,!,RP&#\urE^Ʀ+Qi킫Yc]N vf+k!\ڐ|Vjk3 6d+u6kW6׮ h q2 .+KXW ygPT>G\vV6{ۙGvr_|;nNh inp5l^_ݷ˛W~9 Wo h6xϿ޽RSxsv5Tu}./׻jNjf[{;E|usV\ߜ՟=csx׆1oȬrFinջn}E-|Z|vu3_ۍlxSfϾT^Y?./d<."wIw*o[)`ݜ_Of.1js޽B^Ϳiwoɟ>3ηrAɌ;Z ~l (k* т2އ- #r򿭴[szAGo%puS7۾lTSm;v;< s z{[0^\b@r% "^ ]zyO/LH]K)qUXtbͪAh U ۂTA7vzþ>뎝h #֗ M1aQPl-VTT"M,T5Bc(ӽlhxza'mz;uh/~ev59?u>WwWW`3)VehU p?-y!wz$() =E-ߞ-1eWg ,嗟N`4<74*7}O{{)LGO7Nn\V'Y-W'E岙2I^CH2y-t)ˠҒܽ#0*C6wKII]da=) .ȰÌ˶<ȨܧwNg~4붓tCZ:yNڻt^ww (̋&m{e%_.8Ǯf7NJF*x8ee۵$;lp%rMW+j -q%~`ܠs%:] \WF;g|Fb>#J: D-q*^p5G\!(UF>+|.bIǕej`\{J+V۷[ŕejr@!pł }6+gR;"J|ATΠ j qS  W"f ZOJT[p5C\K4Q>5vbdgeFv9Dz`P!L\\0-j15;Q̂9b!Ѿ &]cLA^N8їio ʛ~gajOU:J 'u\JjBg `.\\HW2,kWs[/{t_\*\\ԹJԺAQIf q<J=f\JbΠ\p5\y/U>2•&\\JԦv*Mj3\Bcrd)\\\p%j}kW94TBhj܉G;t2$zZ\6%f4̪x;0־kvOè4K,+dqtrFEFqŬ|6YbMx韕f^v;Q!wQ}VfcS뎴">NeHlE܍[pkk/W,߭JzWtzq)%Ag+P•=t~q%*SKrEpeAWF]6>+3NM޺-!R(#\YvlAQ\aql5d+IqE.d vY4T~q+9 `Jlp%j)yU fYp D&AԦ3*i%H9]u%rriTJT,z䒫'Y+ZujVyѾvH=2-ni(h!u8~ָQ#[#[u$gJXo?š5kkPMFb/3DSBbVт `I.G %krjL޺bp qe Ha D.f+QlU,4RCzzK/S5, DT8QVuxqdpQjI)sJHh_ỄwٵE:#\`lpr޵q$B-`d  llx4%/߷zO/I#izTWTW}U]]|.*s+衫̥4Gu+&$̰F]!=t2V騮E]q|Z<#u k|Ad\W(r*אꨮBu%ßhU]!?vj\rk8;tu%l݌gf'u~¨W N@y\$yF5ND=qahD'"PEٜ99Ow3FkpUPD°'/ouT@sJ>K,h4,G6*b"AdTS!~>1;Ͽ_%揎ft6T5b溾Cf-jjk,M-.Lz9"?/9^|W7>ӹfZ٠ذ"+c{ٿ10U mv([υuq ;Mb.rzf)I~u;ҫYuG\7vڊ-b?v&~y-rSoGSxtuyA#\sGX+Cp~Lx {:E'fEZ %%BPDkB!To:.UnC¯wmi9e}_TGv-q$MKpxٌ[zgn2& fE;R`R񪤜RjE ћhE꣔.*#. ȅƉwϹIZMB-Mђ(ZvحjuSrOX͜B&g !lzE썶é]d;|tFjײBMIݶX\YpEUԞq \i:x[Ŝ#19fv94"'BkV|f"oW1`< 53T82X)ͼÅ˼Pl <nkB%4ǜ= b]Y>pdbAD@0\ _)J!(O"PQ4l"\;AG@EjLH(劸 xJRB}5l:.vRat+Lw+CBBDl^ROɅѠj)E@X`v \pņbꕽlG.~ܼqx>fo>j5tz;o8K\?GN߽SUwc(!"':8wiϯ;g&?BgɭEry" Y=G;~i"u:P᠇ #|(N"drNVSN\U_TcT'зQ@Jz+w@Zm`=j3g7^i} (M9Jջ7 F[#Iثv9chHɭf_fQ묌iں4ϡ5Qr2>^x n2 8w2{Κv1KN9(d%^~x7>/!zZz{r}O盺aBnfYX^㢍Pj3g1[AٿMtwol>2hmlu6MnwV9 5;(>? FIAdѥ'3h"o$ڤm P&N P͏_~=e{/g` &47 ݏ{ntWߵmuͺFѵemz:k~o.hkg&^@2|YzUWa;yea5#eu;q|^'LI\p+-X%s=/)R 96/[wrٿ[H>c)j-$cBgzb8G7}6T^Q/pdg_e'T ]Njy[Fm_x=gR B) #xOZYBW6X={:T0 )u:m5X3 l)Wax+fӬ!⠥ inP7F'e y<xcΞSdXPldv~._2#|_U⁲ QbJzKytjYSᒤe22C;lSU&e|Rmțm&%+4ʐHO5ǔEÅ'21]5_Pyi˵)I"NOF:죶nu puYM+aS5!L0(~Y6h@ɵ@?iAŀ-LD+ i/G&9ʈ h@ L(.6I%x4L84J+DG uD*qR&CR !ÌN% "T;oe aPzk5hki>tܽx !Ves}0..emDWBD ι/@; =/b`Bc9cOusSu>gIY[8v~_+Hmޯѝ˖'/ng*m{dHPH_3X00c=t e-B:}Y}'ւuӿ7rܐycKqbV.[nJ&gMc{7f-7nV|kێ/KvMc{DZO5CZ+/jq1ɷ IQo(S=s E X`XikicaTi2isH3v dSwTUQhH$Ҙa6j͙Brc.7J1p._Zcgw uhpXQ|XF8BUg1EY#rnӿKU#ZЬ0"cP`фǢ͎ cUZ׮>.vIUNGSz~ɎHQ$&&CA4}?1Af:I#cT  F!BLG;lё+n~2< 0A0-dЋ\Q[^EfV1'8ÁHT ƹDl5vLyhpyuHS|G)7]f̬\ 75m^~A=wQF5{%:F,B'q&(zȷ,koLxgXJ:28l@}2!Npd2(pQ@޵q$eOwcع`'ķPS$ARvV %J5Hyl@uUw=,]YYO@`jKzvջ垶ަ޲|R3Eeegԫ_H?$KFG(U PaK%ގ 1!sR~s[׿ :1؃,L& B:E3Rq\,TbAø6D$u {yvs(=|NuBuw[w"Ǖx6Uk^/k/= Ypp1 WI͂٘Ys`@CTx!Dȼܰq =0<0Nv,?!L`^)BDLkQ czXxH<-'gqVa^9mۼ$OoN>e^F/PvuʳKBGϱyoW(N//xpJd0#.e4|H›b8.g`ZrrrZZ@@mizW<ǫ.ODX)5(jl I<8uty4_i:hܞ~a[#ּGv: 7b{[̍֔bFͼM~⸈ƲYJ'ʓpm:=ׄrq2'Ewճ%]+ȧO*ٙ3^w$yѼ)#ҕuFʗs6[}f0?a2~mlZ'Dr<{Ic®ny{S<﹧K3<)\g$-U-R_= oļ8k6i|g}L|Ľy횕 LD;$Ю;@E-hw/<ۄpERF{f!=i ʑjF3ځ#rEm;-%D;9lH?=ϣ:p! g1prגGqHF)v) (V+*i )hI@z2tVr@k5eo2'[n e,bZ%~ 8C?O#)ZH '4q_uRH'DB,Sgb@HE*'t21ɜhPQ1vH]:*"ҚVrqɝ "DPK%TIn<r' ؾ&}]?O` ~(uLa[a ߫z)-svj괤fy޿TD"heR%8J9MńE}Yv$9,;N֝hC2j]P$U TP,'ˈDjN\BSN_ν踧('7ZG_!24xq,H D98 6HXjS} a*GJ@>d8~4 1G_( f\rcUN51cA팜͂rrjieӏr ʁĮJƋǪ z(i\Mߟ0j9/XQn\\T)‘EX]Y _g߳A_ bB="$b: m.x~m0c?Ao!v+U <^W믪_ԉ+]`9Q֢agq9k /|hJV$*ZȍlqyV4ߞ}} u}+#_x(4 lgM1Re%(M I%={O+={'Zwf1Jy) L)MD=nN7 ЂV)MNnOlf5s@hUqв Dk&AԵ9ܩp Y\L&JnڲwG̅ߕKIt8C6M<\UbzqkC.뫷yJO[NG06hܻ;Pdю%P\ ւP{l4gxpN#8Cs 4&(a6T*$‚dTKk ,,I)źUdZM-C *4'_雋Qםfjgn!T'0* ;)ڜ[G%Zh*"/EQFOU9Z몋uV+Jܩ~&T;;nmG]){l_qs\ċɸ:\݋`I#]|6\{j#4J}Λ3kco(hc~k[JA*~j%rR4Jv rqRbz}/&y8.B#ܳQA!cx}ta ~f,7y*b̭`趝JTolw"y' V+.q9OXpʔ4FDaʻڽðHa2n\s}u_C4 ى#SU'x߻: Ι4)RexIsT9+'.RJ&e*O l2Jq ~\4$1lC6we,s_Oͯ\'j2+X+@biRhHYmx$"2"hHVke9$INSB spEd2βF T$M8wԴ 6c'%% JiDUuAn-$N?C3a쨘:?%0:Z'8TR 3G|FȨ nPfɦQ{X +T4gqJAQ"Ł.0̑vEUP4iNprQTuAT"z_<48 YvX_ U>X͛֝!pPBY3% Y,kOM2+V*/[)W>0<BwvӊqJH 'Q_r{0gKÙ.K}4-|OG3|CKuk+7<o< cx>¤JrC#QhzBb9¶l^ A; In7$>wyv]8 yZOV,T4Hc9-C]KrAy1i"x9Z& +Gtephy 7PAa4Kq[S-Ah[Ry !zbA jhjwXbT3ceAXȡ Ѡ;#J&l#ϱ۰,ALJ3-VUDi5eHTyPZG(o<*"4p}XT&N*b,zTY|,5⬐RU9QSTZ(c.~M{^<ԡ9;kFFP[ ZQ;~T*"]C&-w;$aS6i` nQ}A f՜%C8˾Oyg ˳v Ǵ˳Ep$;^X, B4gllfѳq5eowO(vV8Y:u4k昴kFq5R֘4ZefcMQ?mAnYo5sPqlzۊ\sA-i.!@v⣫nI֔70hlCiJ3 _2~.VO 4%鰀U2'Ik) c1[.ՂHҘ"b9R Y 5LBER!6.Xv꤀k5c4&TsE}#W3 ՠLYˠwKqbqEZ[!k@3kԲt,~>ar u G3AhWTnKX+m{ 5b&{x*BĠg=||@l|@!6ʭ%t<> i$> H|@$> H|@$> H|@$> H|@$> H|@$> H|@G %|@ZEi6> -z.> %.(}@&$> H|@$> H|@$> H|@$> H|@$> H|@$> H|@{YdѺv>> gT H|@$> H|@$> H|@$> H|@$> H|@$> H|@б> %5癁K:"P ڑ$> H|@$> H|@$> H|@$> H|@$> H|@$> H|@pa=WOԔzo/YN{[Wݟ, Is-pE<;O-5o[JJb[:{]oE9#`ol:;b:]1$#]Y*DWp%7bA:]1$c+J)|-!+森n ]mQZ:BW+ ASdlf_ܪB ]1ܯDW֩C+Fe*ݜ64`糡!5i.th9tbBWGIWI[T 0hl20ZsW2c+clT[k~1qX>Nj!oBy`(bֲՒ-7 (w쩲'ZӏϿ9\\?К蜛Aw N=[߷'TF*Oofk/0^#j2g+_˳Ze_ r~? V͖/* >׳7o𯛅}Autf O۷OFr6RC]d<Ī㠜GbޙH_8 oo\G4ܾ4;evSsɩk)n )8j&x/:٨O(*՗V#O1 GDg(lyQ^?+5t)R]p+kgCWC+Fh݌ Qφs+FH3{BWHWxU1j<=>WfOOW2#+2)=#ZmlfCW \,oyt(lס+Ǻψ85b΅%ӡ Qi0ܤ\Zst(=BWCW&hFtS 2\Os+F~ (IT1U$c]+\ц/eb*c+Q 2PN~HQ)UCPޅPW}Ê9_/䮯Ynۮd\,]qYⲫCC"ܫģ[3LwlxNoyd#2_ή^뵳&[]1\Gs+FӡJxs+6!͆Z7ZRQ%tutey9 W7FuQQWHW7s+CW 7ŹІ/ +FI$tutQRW5 LEWnxꊏ/2~t7Zψت+\?d ]!]|T;]1\7+F͡L2wuẗ ]1\ͺ+FLd*6@&Ny7J9l}Ss9dJ[ '=;싟aN޿|٫WK']uzR"9-kźbp49n|Yy|a-ߵLWx/^@^en濟^>k)딊) q(-F%߾?WF_Ջ,\{7o7@?^^{\8l~V]r} \9_oS#S?Ѫ]w}T3y1ͨ[w !e\^ g()5kt1l te(t,gA4u+;D}hE}3jy*O/պh\:QR@Ñ-1U*9Xuݷn\ BcKѻ`pPpk[.nxyxWk.A]n3׻]Xw/|_QZ֘.ί_:_FrV$Z.7wNKܾ y|5 qy~kso.~1)*5PLsŌ*>(sRA:JsDbkVJƐRN6*}#@ZckX] O<@&YjUA u#P1*]sF$b5b <\Amm.c7>`f\x/ޢ@W4kyjgt2OOjS}\ *g?l:ީ:w#GG_ ,{M6@ vvn2Xx`US/<-˭I]$X-1Jce'&emm_wӖ(x[4jL,,tWQX?-\8N+v3ZgC_mm̍~1l;Y0t0=YEU})gY;4|:BqaOg*yJ6_OlRN4NVF3 ߮fΚϚM =YƎDW6RJGӦQnJLgc'N{Nb بSY_5klec1k/[>zq˜V*@yvZaKZ?H߮.n3<9E$-Ǜ'FidwF{zdnJwtyЬѹ:5׫Xq >7 zKlҩǓ 5`@% JQF0@R,)ϸ%3>JnjH vmY~?ןq%=C&p\@+478jt\7;Fs4ߦ>}gfCnࣽQ/xܞ9껬7cqcFyo\a]ur 0&O`n5kXg?O ,o?KE.UzTޛ;. d5G`CyW`-°% ;WI?}r]f$zZlm\&ImL&FîjbH"~(51;ʲ&|(٨`YK L)e/L["Va -b@4: \t#BT\FH!FϕB.26s< ^Gc2&Ζ)] 熽N'p:9]{ $aHiq=)\Z:~;°CM!IYLPuQ!+P2ESysV5TT̒[&rC}BϚOA$PE^)nh-sVqC%b^,ū6c1';f WQI;'WA+)gI']8{Ll' | |+?^Z)'bD;XfclxkHg4f#+odL.9m!{2̀rV"nd\Iy3*|a5ʾP}^}Rrm낌O74م/dNt80o73rK^kyέs\J%CeHHF`y \9B uIt2+ L((nSFl&nǜByT䄙%*{j St^c$\ָ dHhc@^Eì $2!CE'| KFpp$EB$T ?&#TjXxjqG:"L9dU@z (T,"A 17O};Θ1jgc$YD.~&BLNAíD1i$lR)pŋH:_s:Xg5-/~/> F( 9̵ږ.ى2(2Q&DpYVGx(v?= 5L#4TC("-ՏU?U?*ι^M%4]|Lv&ÿQjUC!*I\|wS=zGeq;zǝd_(BuZ;TI& 1ڨ` rUFs4n qd@P TP+s&` \@q=TԸkLrzql+׳Ww? G$3Ú~m)feG [jw/^qdm` = 48-kÒ !{[=(@,̌ dQB[V0mĐ%X/ )A N"wP/] Q}e [M-_ܹ!WϷ۽)ߧ/mk}גf^:Gzt,GӔ0Dr2ĉs&鉏jbIM"ٓ)8*tх6h%sڷ,S eF%*3' J.G摅r=@BR);^Gn kǫ%jSX鮿&Wt"M/̼ E\EFv@_*deB +zJ@cv2ZC[. i#dNlrR+o}jāIcUS >3N䝀N:Z[l!'ѭmI[5sgPeJ9( :pF:dkVY#uݛ0: ?Ԇ=e&-$^ws1J ($Gb,KDFZcu>Ȭe, 9i] {ݽ& $Z椬uJ',E6 dh.`yJ {=v Ub`761z`LHVfg H.i*I ! z k/Q:jך1$dh+ XKB#g9iQYk#dTאvi*ȕF(fL ,c6_turYg6-,eaԝ)Otio(b!N0gO}ۜГR'(璉Kv)L'a28=L^з}0 *'9Y",BԊYŵܺpKmσhZܼ[ 4M:eHwDؾY<;{~4)KQ4_\'ZKoҮ_4OnW`T6fG7KhBT?|zz>f})JީE<JwoW#hkEn,B(,.k!&` 0z?TEh$dA[!Td0$J? 8 ܆u8הrg:2l.,"((2bFvi22MZ) M2G;$Y X +UoZF7 */[#Ńtbr&M3xQ#(QgJ)cY˴Ds[ I(e ߀頋΍i]+4Hw]1,<~z-ȥݔt\m?ܟαkC||ĦT,@9)W|IӬnrG?{6?>7g=lVuՓz_UuZn_+m陰2ѮMr="BZrƭgʚ8_Ai7v+@_FnLxc}8uR(C:L$A dP̬<W@Dn,Ӷ;&H͙0]vakH&udRZq%Qe#2X%WE@9ssJm2˂x>W'+wX359m^}Zw*F^0HQg'a2zl[d~Ag3|/QgSxہV xnQ&/x谠ܠm}g;|b[}3=5.NTl`ujp:G:nHVIxm.;Ė l֫unz_|=o|x=fwል1Q-F^}%dͽΨN^Wÿjk̖[u6OBzJ8^GɝK&ojn)0D8@<:@+8uQz%<Р/PgqTà9wR@!swBgEl9J@"Vuݑ(BфP3j9=;_tՐJRήQ,Jj2RW=/QC#l%Rp`)%@Q eւsˣe 9E/׉2tsttTdzh`;#>_]G2^ _ y SY*ԭGoY8܄@^S;@)7NШPDcKNeZO=x፽{!~$ g\+bV1t1Ec*!$gʻg1[fSS(dF%rA-6Le^dAǕmb:2}ac38;zܜ> tOWɇ5/lv ݾ|oWm6n)~_{v HۜM:̦DLJƈlQr<(9K_!`N*=eZ{ǡqgޕy(6 ~aub9GoC\t%ekotNQٺƣhQauP z^c>j-rnr[^ެJԨ&͑,hGr1\eER i,3I ` 炇Ao~ yy(}!+WlJpa zn$>uڪ{{_6%1 gsQ٠7DL*`pC);IO9 ) p?g^m)@I>!2pvJے;Q'f8%Y8 F)'rFE# ϩhrTgtCq?^bM䳴~>L淀rv"9'/}\`k:FovyWru%c2N'gWyG2.Fo./'i߭}42 C:M1\׉~^Lƣ.ω`׳䗳_P<駿|7^w?CM'fN(*$u r^񂜝JKf?ǣa>^=DיOo42ZVko&Tlaqw7jSzq\Z\_6QܸzKK:-NjrgX[R<=zni Eҝg;{5RF-FDrzuF%/oyW.W/u6PJL,;勼%ׇ2r:l{L=(zVa7]}I"yu9S_xW $nY]RnyA;~!ug&y=X-u\R^uztk0.,$͢hW< pWTDJ> ) eJ|F >0$QڈAKS%gG<# L'ZC{c:Rk08O| ʔdxdIz6^'G+0f/.d{[ygkɜh2E2gDV.=WU}GQ/IC"ZWT;VO1Oy-< 0}/I(ya1OŘ"RT^TCW׀:B\Ut eWU ]!\f4 W& n++m5thAfP VCEt++m-tEh;]J:Bܠ6X]!`Z jNWہ`Z;V ]!\S w" :JRX+6/6Kпn;rtG77NN׎BAF?O*om?  g#NciOY@UOVBlˋrlG ?Ma1').i:Bqb>fE{*A];8ŗ)9[7Aow0㨖Azi$6Zu_p!\M +t-tEh5;]J3(HW0"B/~y!]\Y6]!]vHWV4 !9TDWXs3Hp-BWVۮ@WCWg^d""BJfPvutדr#.ƊXcAXpۃN'͢=n\}|w YlSkj y,oҊ4j9D@K)v JΦ[lzٞ:\Ah_+JjV ]Z+NW񁮎2<\beڡ[ŃBW+^]!`"VBW ʾ tBUtMtE\T-tEh]!]Ԏ?;+"% c+];eB5զtE( tuteI"ej+Bk] Q:6 #]9aE{Ϲq9Vl,-.Se`;p>w`k.M#`P\mkiBtiQj>F 6nXewm(>n@qûE]]o9v+"#/yI^ X Ŏ'_ϪgVZLO9oY5jV}S`w[^&Lw?,NWe0JW/l! fDKjAҕ J]n3t5:h;t(+]@Hӆ ӳ/M[DEҕȖ ɅՄ˛,<4KՓUBm&u5J ]5OW%i1j7!Qf-p)jLZ Da-+NpC ]M xEU!xG֝?!*XEF>nw.t{6.IߵlnKi{ s/Os;;UEAd3btMnG% ߎWf ~+5D+E|L^kM8$wLW&{7 -=צ[|.{Е(]=vfF6DW0fj}x OW=5NIʦ`?}x{t5Q&tʥd㖊Avl&\u5zstC^ ]ycXV { ]M:]MzEU0Ol=Vv].u+t5Z:t(m;'hb-+gp~pf]MPzJȒl [ε Vj36hQzJ- ;pi3j僿38Qt&2kcЬ `}{ޠEh@5UV6hI ʐjb?鹟ɻ\6y~h3ݸ;'&=YoZfj}CD +k9- Xd3t5gVj|t5QFIW)H-U Or ]m+t?zGV^|;tst}wG9=;-]_M]}~7A1;wߟ7?^iA;jF5}"Ot f}z7䳳r9Q߽@c_vwѶ|v|yu~X|\p)w{]nL,&ȿ^۝*O.88ͷ~__M_z{_O}f|G1O.oi5G8>=o?~C_ȩݾ<>5 g_Gt,<<.jG͗登o3O3>bNߠw9-}?om]Aǭ;1`u>ԳeRsĵB$ΔN Boyk| $!>MEKWWo7? s#S5&-qQ"ps4%elPLC@1R|yB*ڜbrڹD{Ͻ#B)BQrU F"Nh)rQ%'/!ѢkezRy@o޼ EKcx_[. ɻ%{;qƌ0'Z[@,ɌaB4Cb_CB߸R@A-VKlBl1׾=MfNl(-4ZD@T1TE=6.!lCt-20T`&!R;ʻƐ8j1s hs`>fxJur u$~$iWr?o(ޡ T,JAJTCw9X9Bz8o.B9*E`[=LG*F Č@r#Q(nD u}o)ɚ|X)Dɔ,=%c##[$3e Ȩ&/M,t= քRjH) A4!r`)Z>EВiUbu:abCfM: 4p:k(9;0f%D -5.U{ b =w$:K+43Y#eP4BZ%V<`5r,-x`fRm>1h-Spam\_V-'a P>fWΛu šˋ:±e60.Zb"$l01gGiulGCVpTFU|"X#Һ1 [Tg̡m+"J@n7((α7e@ƛoރ >wqUY{Tڠ@2XAӪ5-k(-|i䋑Q+iq#0-8V=-z5NQ!,2:a$قn-u;Ea2E|X5fs˥:pU\*iBC,K-ٱuPs?NJPpԅ˭qf@BĊrYgnyw!BPq% ƸL0H5ozex;?/-WKwkҮ*|9 u|1f@-yGo;G=bx|h=cѯ2>нYmCU{w E\Ƿg>}}~os@zZ͡Es;k;| 8SzO?[SDQ^+uG;9Y _\6^^:r^}z|uW_y5OqrWWW.Ol|eNjwÌWOr~ݯ/Nǻ99ћtr b!#ч|v6hTK+Mg@]?4ȱ K'Lx ҋې ݎ}eDۭ&CےLVOcrl"JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[v+JlnȴzƃxEڭPݿڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjR7XWOݭvV~^ތ h (XnVa^Tڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVjRڭnv+[JVj VIw_f_ٻ6nlWb;nI6h@{6FHnE=Ic[eK5W σHC!V=Vs[nh t+@IUnuV Bt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭt.ݪKҭvnu+̥$9>Ž273ꬅ7{WT EZ6eIޞ0%iMvJwavcFHX.`i ]ZE@@銃PPӰ=! XT|gpWzg~RHN2[.K:(}0dU-만Pl1#*FS(hԎ3?gEZVUw*BGGKb6QUr6ݏtq>W <^t @c(O4Ր7jewVMjA#9Hn!N%Pmy~?+9*eG,oWK~+vzѿҫ<'4}aÇ+{.{@0p`!}x]FxG_x^⎆mumlҵZԴmgPЛE&=eKd[OX (4ք{S[DWX~ݮJM %2ѕBXWc}+@Iqg]"]i0oS[CW.kM>{( )ޓ:/ rq]բ9AtضRnYZ&aN.F|Z=708;<0*d^Dۏo+vϦٰl(*=*%RBҚ(8?|8-Cw9/cϽ e1To(+KeV |xfmn8kb$SI(rV)b^q_ˋ|=22WC+0Z}gIw'q[/O= =fi,q 63Hn2E/E=6=1?@>@yBLD)^lmUv>ccn]g1=.m}R_j]`LYk*} tPJ GX\BW NW] ]1J4ku+ puk+F)z꽽U@!\R" %i ]JB{ (Ig]"] -+[ p h:i{ (eGWHWȌ`ZCWBW-VNW%VS)vK pj ]}w4%̵g28&P>KaVL'5yA/@fy,s7U˒C2w hQܕ,>r1|Ek/Af׻u; 43T'lPNć> o]!5ܘ'_g/{6~2{]ᣏzv,ֳEODdh6*͇ZodQJeH5Jiuܔj*u[Ru]YojTmdq>eeUd Ǧ܎KC1X.{i80QX{iI9-SLS Ԁ'B&D\h3 ƥ?+<6$)@JVT}y %(7.UNs_N4]Y./Ηǩ B~o"F`\Df›)0X0,r yQ!b/&v>7drÌ?ϋ)ʹtQlb(Zjc}̼qRmBqj v&~~$/ITwKh)b d(70[3'WnZUB9oxK`|i:5jw `M09?Q_ƶ\ rvElz_FƮzY tznfFUHuRkyk/….9YTUı1(ŜEWz.auyU1AMcM6(s.zХ]ZЊ4ac3H/:I2cSV0t OOw9*)tʰJݛed OƵn^]{^G6(0;ʛ]_\'?'o 5 *xͨy4)8:U;٥y;*+L2 EswV ן[zUOiO.3덵sohYlJ!V ڟI D0^TU=՜}e9rf̝2C ފˇZo9nǣEb[q"ą"(l0GfXYQn' 懫[&fтUᙇ[=߅fXhf0,< KHA,B$E,_dB(YR(L uДD b r!T 7d.v8+1$Ĭ"Ӱ{"%eRɬ̰TiƭVP/( j^{>[ny*pNSkʺ$q^;(3<%ViVqCg3RbA!&!09?>tMIJ#EJgPᒂiƸ`Hǵ˕O0J-g3)%ץ:icƽӞD`d`d }J8eJ:Ԙ oDBk7@UbY-o~}~-ܚac߿[7B ^y%jEf`$?; Pc* ~(eCW1 j?#WW!D] a@#ƨw&ڞ)  z:Uy@R |b<" IWÕ `%\wnAhZm'17λA?U46%ObS]^BAz17/&e?=SP[Mc`]iP6m~~ӗߝbNy|0|=0&:o -ܪ6Z]U ͫmP&luԫw5VP:g1 DIˑ{6Wxڰ.5#΂z"7`_,>3j"JTD%a]ENDAS~PoowG׭HI}$FGrR2N':!45N(z=a.LꝤ:PKUI\&G9ťw_`8n1f$ !V{&K#,4B+ȾVx\5Og-wgi;kqq4n,Uց{L7m {_F+̪n27vmgfkh#P3XXjc46)1&YpP3mGm yҍ&"M)yS[N%H-)%sv im109x)y ANN`%XH 3 ދT%BJXg+,ԛ*&p 3 V!&PU5S0"JƤx$ %Q>ZuF^$% V00QhSs J2>Ap ƈg^X>WK`gеN*(ZDWp31xbNu$pcͺunÿ>ow}ωI_C~YEx݃סa1I3vY ISH0#h3 u[/!۹t׵o2 C9#S@=3T=PbT1;^DnnJ:ޭ>|?-V[j4(C4n ʔyC36y2nrG|6?/ǓqGȾh=O>=4;{:p;Eغ4hV3ӯ{m:ֆZݓmi)cC9is=wAnjkJΧ)Bc8%[__!~_Y}6`\lxTXvH*,jR瘢NJQ5­DDeSfȱ& =Z+JKat!37JAEK5qv/aOMmƼuSqFyq,nuG߿6-\\k'VVMY&c@ӴcaIO~M9&u^U$۵1zlwOWW.nVCv$ΠR 3YCk|7oe[|s;NE{M}IFBTVisQA&Q͗D5Q~FYƥ%rI`3)>Z΀k4@J]6UFRk.!J5P,}VdR: 9pYXWx̵Q5qv^ ›9i d[(t/̪^^N-< 獠5R#b.{{4$bG9H+spREncc j~VEzZEʧj(W<o<4Y$(ɰV&:'N^(y@r>./ |v +zoFGV1EέB**lptQ9]ɎRJQ,œJ/v﫛7yQ BU<@\.(7XYJ bw:h);20}JY<|C*OAchhdѸav,8L??́Oyp4?9 d|%+`@F>DhxBPG٪S( VR(0,zr/LL!32g5q1l!OrI3t6^ ۋmh6ݡ؄r̽Dyg^>y(p0e9reQ49(R^t\4p'3<oxxHR!q. BG#U,f%n ވ;M&Erwhێ#h#-qEYQdկܟ\{-Vgt^y,9BcbM J-xG~i uϮY-yj탿{7R 48k890)9[.(lrBZ,* {7W>FCؾwtivzz9l#rg8}c1w&dŬ@Si͋R{e U+:\Kn!* DU{EWj;1cHЂ(̟11eaGq ZSE  ZB1l :Z}sBbnitgk}~FDtYzNqI^j$T0N10@"$DE*-Yx8lqt=ޝ^x.d:0k:|ۧCOp}<]ﯟ>nOtįz &Ӷz-Lއ/- r" g|N|8=ƟM p?;Ӄq윦dtɸ5r8mOM8_GZSxNHyF8;LǮ5A9;ܞ/4cOwr3у4:njWIiYT?q<ݾOHtMy!n𧓑'am#QܑԺ6Lzrb8:jVL/}?y_r:@?_ڿ/S~'^+[ Ý,JBRD9KžRil˟?NVeO?R0 x0 eK,_;ڣýr]WrS4{rnsiWHp([L~G=9&_)Wrr]:QJ=;8M >uCt''3<l>XR(swnk)WLwUWS !Vj?L&ӿ?|@K,;>r qfVcN}Xu/"*7zixcw u{Rh5p"p|ۜFV; '14]L󥚸y{-?t$֝5 5VHoMr}yʶ> wvzq6 $F,k9 0 `B)sB1կTYX***|g{&̐dȖ'C0`r0.2k%3yA+.- e`ӫ _g%jdΞd>Ӗe (/ϨVƅ龜x@l~~׎qa ,KFUeiL'#߲00~\[0LV~A@mYhaΐcO(ӻYV`4NR9RX߼K:I9xʍ77YhQ %V"D﫮३NJlžܠcHd%48f`' U 7VYSqbXgTv[K}\Hӻ^`4)n.xqe9S NoMn}}xr3EOYĪʰ x 8Fi@4hJt:PqQz,""x<9I1Kn, tvk^a*1)Aj4ʆZM݆ޕ0Qf,CVӓC/O/fa+Yq|iI'mpq)wgji.淪AZlwW=ic.)' 0YzE l5cAm5*%AoEUh͈q\O.:m1{2̐嬂MhsB&'U3VggUjR_+B½. 2"Zl q1w&Fp=v&BnGdjsR ''!-,n)̊nC  &mJcQ`ZfY(79̢u=v5qv{l^y(^vk^^G[R&GːwSp*bN1@0kcI!WEG,L,#( HAu!T۠ 8CG<L<wmH_am60efwع63X`gb,y$9/+%YZt$jYW~*<同c@'*}^yK#7{YΘ6lcT$pA@.LH!Z57mdI4%1q8QJ@hV|͒]Ehu]7x5$wLMJ2 SقwAjL&bmVj].z8` {rѤSG> |aLĔE\LΒqxz&.c*=de1JFW"i+|;wwzbwIaiJX,JmK8 r$dFsЪYW\ⶄ j*-:yA$T_^z0LZtSlAs:Ycf[]p|y4qroJf6=^N)W0 ūσWr^Sx^e{T}th-eG1oTHZUaPrދʇ:IZl dPžʞ{@xKH8 $gФpY>3Hgrc!c9gY { ,Nu'픎ݝ}2kk۬V~祄s@҇sC˸]ғa3@!DN6gQE*٤:ʫH 5a ;G]Q93Eu_2C5UoDUzүv}&!b-B x03}5/75?:Kd@& !x#$0Kd{.** roh<=vdIۃ9V_ 0+)4,ӶEJs~BRjZ2znMƣDcԒ!Zl/<Q$kDVh,iM@j08BE4s}sm@J!B<8WkBE7>$]*o3;u}%hj܂&,}wP |ۺ5Uq1_LE".6EZ^ѦHV*ܰuoeUG8+뫫0(/X+*Y4;2MZ'>Nu,':F # i1z#Yu)$f8t;orЁ,ht:Hc+pBڢ*Łn\ xL,+JR˒,RE`1Cdy܎ A'w3i̛L:F^qa3[KmOԞ}@:\4DI2c% UihmվCبguv5TXr y+tFG02g(3/29I2gEζu K.+rԸrɷdܬ+?3vX d}ys[L+aMv|4]oWs.oo }KòsA#b%1*SX^VX~"hZs/)Vclߝ!veYLGX&S!T%gV烮Ĺ[y6i_וOi ':KYn'-8ZOLLZ>bLaqٽSgwFp5ns>qKv|.<`m u2ߓxZV?mܯk2`[7GzrhBX6.U7^o]wOŪ\_ZW֛oO?z6n4RˊZ΋Ə|K-7χ`~cfoUYzNoȍq79?vpvp1 dl)g ¿qoL.I(_` dJJ&E=eN?hN'7xH$_KTڼf6xwru!|L>U\pέFuKSfFJR>ب$TG3"Xe B3ZK<'R5]gKW|5=`.&"΋$XіBO~ru h]~b-;_ZMg~mRdBsLQȲ@dBu΋D(t12m\rYPw^;1J(S3EX.I5Г"h񄊓XOB}͸zQm !K UhDfN90ZJ^FX+e)q%%Y9_cmM(Lotd" (#I#!sB3YJr1e[EjVv'&Rar^%fL ,BParvԇ΍EھobQ>fQiSJ Yܪu9I~pnzhCR.}xQel Ν1M[w&͋k[9S}je`PXLkϽɳnl#mY^?iI=]vZdyCJ?ק}ك-^wG{^mu6uw'潎g:R:V4}:40# r,w=fniN:Y?܉ݛ+?cͫ._撃|߯/v?cMl-?!Y-v䨧|~]C^CKJ[8rkmO(CϟgsG ,vI(,!ړ;izN@} msu oKb4_G:RsY5XjپhEO z}6R2F*.>iH2#WgLFr6Ixe3R`V2xk#0[Z{d-OxKC o"Mw"lvA hmjc:tF8s6؜g`[{`dǃCt96}";'9QޠK XզdiD3>e钲!X 5f)nutlX MmͽA;܁>d44 AK2銌\{S)MK)I<'S>HoC>TV͏V']wYtm_&3S8z)Qke~!a}>%z}vaTî[Qu]ڳ*;Y=;&ا/U:DTE4!4)hhǃT(CFV!بL̞dyqܣޢS[r C/<'AE-<)U7є8kJq;k/VZML{m z,˲~y[TP|K%ퟧą#|XAi <&ӪOmgz8_t} IvȞl]`PI1M l'!)KHRSL53U_U]zVO~W%쵉CelNC1^Pa=`8[| VDN`;+=Jv^*B3/`>&a y~"Seٿ^=]0H$p*k'MW3OQf9"` R4"^JG`%6wQ{DW`d'U~&OVHGF:t#AFQJ6hS>Wq2C 2$Ѱqje *;bVAbl=q8aJFyyP}oڮZ|A\%sV?妦o7mW  *iTqٌy*]8zz+}O3#[L6j6I@5Ho|)E{!;HF0Z}3d`7֛bޗ  HQkv |! ^ xuҧ_8crVgA$(D4( 3-Qj2h[T]B9-aaA-a[l m~;}1|u>M?-Un=|_o)( ;Z.?>O8~iW2ʣ3<_OOO/ /axaJM'l^̦o`:|sMh,|v9I^*I9ϓ:ޞ_TJEFqۇߎkng}Wiq9d6:N'<uKysO};X1]q( uNL/'G߿WK^׼ G`@p_ԗ[3|dG5yc?T}8?8^=K]՜ 'X*էԍ?ٶ'ƫãrVr7{ronW}9sR.fro `<5q|oud˫G^nVʑufJъ(BfLlʮ}!1Wjf?2 /ZKAggCE EZӽߚ ^z[̂ٞEXS}Hri|t7ߝ`k5ehuEաچ:kz_g+aT/Q] CK| \ T!ZDHC#@'^9ܾܧ y܆;~.!aVҤTc{DBN "I@&l#A c$I@~1#e5~kBϰiID#RĂG,U44 5&Wu:^XNAMQkv5xD?膼pXu-#n9JSħA35^Jx&ϫX_LJJl^D]QYuE3E,J|k6xnp#cG籣Ύλ-*X KHR&MBR'LŨ:xJ9DuTn2{Zzȅ0}k7 Աѱ9ʔ<I`S&`!C9GuJJ/FksNH$1  /Q&*%ཏE9O9.0dn L1Fd=,7clN~[j$b>Ŵ`jN1o&bw/o}IB 6&NCy Hk33ytnJ&uZSݓ&UuH۔n99eͻnnICK>y47^_sbk^l =Nio,ՉYR& 9 d@G(Dߍ$נ;Sqj܎=Qp]qasSf x׊C^u~SAS)J |˪20@G C@ʥ@ZӖ}%cX`v( YwSbrO AP &dcB1޳K.I) :;3D0":VA KQssJM>D IkmǼ 4l&=Wh:m@VCז*yxqaj8'ջf f)xul ("XHuQ*"퇍z'b H@ y/cp>hIGBTE Z}J>yMt 8k kXd, Je-7(AHir$VCv0l o5R*'Ʋc?6s<n4ҧ!#StBέ(-)e㢤,Hп[y:3 A)i0Ȭ)$K|i!i;]>?b ygȧ\yMzRMXmnJc$r~zD _%I?Zk޾7TDFBz!I٨eq #:Z1w92m9m4PT1. } )c,A #qZPB0^з1gk ; 9')E$  KUDJDSb! !H71=bOI\'ػ2EHJ"dH'Bڋ6jH C)]SC B$-[@ 156~JU1ɺ%9PPiMeLKY~PBd(ӫUwz9O`ذ{m:y'ht%wOZU{C)*N#0*RփWHB1jWneTMzWR*DFhzrm ]mXKt9Yi5Ji Tkdl&ndlUaa3x(XhF,+^ITMe,aݙ!x9Yl~:Lf&O>0IaX ƈZy/|e*31 DX)Ub6'<u:1TgHiUaS[-THo'|( B`KED[Sh%nĦ4]s1ʹ㡨Q{d4>#`cH3/K֤O!Tcv ҋl֐l+됥1Cff(訳cIYD2qd/Tg{~P~8"a0l 9Dh빴PreMER(bH zv;tmMR}lXPeQ %V:}I+p Ěz B+qi|U[אbʹ䡸qqŭ]e*AX,i2M%L ͪdB_Lqq8⺃*mC+RYW Wu:t2;]ծU-`/75}2`_Vyrӌē|[?{m$ٜRهE ~Ū&צ#ܚ >TE40]?z`Fv7 "Y]^jx|D Ykޕ/A!sxz5>-F%)__ IIQPmÇ8Ɪ8gIF BedJ4zǝ5_׫P 4W _t_'Y^a@Rk۝Ml P&@S,r $‘.wN[&x{szGݧ۳xn3Y,M@/pnYs2Z01ZL,DIieb#dE)RB ZHA )h!-YDB²WX ^a+,{ep ²W²WX ^a+,{e81rPX ^a+,{e#dgt' dq5}.g}FY,eWxG )/%RB ZHA )h!-RR(RB ZHA )h!-URB ZHAˊXHA )h!-e],RB ZHA )IV ֳ1bnn1|GoMD%e2C%͑4IK%@:|ǝF@;~#sTҫȴҀPQ{8c2BBjH@iJj1ɡxg<Z 6PV%ZNH('t[lQYuGj /o>13}uX3cFIu|>nj^f ` '«!Zǫk(VTqQ+%D.RF{!\:+}֩dZaYi9 F@%̂qM"ADX.Yeji-"fIJ(.=ē1uq.L Ys 05/-Bu`= Ӑb0X0@ ܢ=*ZETTy,2“Vj8>? DVf)͙DE(c#jO8uĭsQWzLQ@v,`[ϼ|=j?5~֪Uufi*neilWM`5񓑛 GTxt6TRAȵ DBs՘J"+U^ ))+>Jd^F=;@Yʣ"2ψrVs)C,&Ot" sH;Ǎ4ᣱ&A ڨ<QJT >ݖ8۳c76ꮯTxcK? VC|2n83'# [>p>Yn=: 1w a.@ (PS&z&3d~.oreIŭpET]btg{1HHI:ϸkLgX.def\SB:4̻J&\ zR㺖xlYsۜ;pIV[VP "q<7҆ڧ`A/KX#wD~8w4ŝ)vڄsj5gݟݺ-`wscz'&B>qWTO \fuFWbbJLm[EkB%D`VE7xH;R;W2^;cͩ5^\]2H.V⧒rJQ 6y ћhA%QJh(.\qȵ725e7糘&ܫUԞq <8ҙgupdbAD@0Tm _)J!L'σPfcmM(MkU <Hej D*:Jk".H ߘl*Ԯ"O@h9&2l^ROɅSZx"΄K^m(f׌w"[y\?vίͯ%3(T 8;ָ֕z1^O5p[i휻A7y'8A)'B 8A^?6NĻ&V q38 ! ;"'pc' .'з_8*)j" SiEj\ǭΦgy`ŧT?.B8w{;W^D;rbjMGM[#̇NVwa隠'Rrټk#\xtvrߘ:K}inϮ5vQ\yS_xu=9~zFYoť~y3ތs3,ׯqu--- uͰH!68YޢF )Gy66{Ɇ^UF6:VVy3 K>B~:Fΰ:݇* ?N]t.Gg߾ͷ﷯o^}2s޾y;p&2:4[!6Y܈-^O|vm]^]1 J[0rk]O8䓐gu/GM!ܓ;qa);aZݖJ&;*\z 1]@z|P^nb [*h-Hx6RZHƄ$[xrQz`hT.A\jI9=1@ΆْWGN P*`A(%v $tVp+,%JLg c)ڜر6f|w]#;GD[ ;+`UIyA)'=M<˃ydd`xy(7S]^|d[^?yoE?.{ l9? 1V#*O\q|e\'S^Ԓ)dJCUֳz\g<MЋ?OSb~7WxmNo]NYomy>,|o r׆iyN;?4~Yoݝ+?ug ^j)ߙ\ى:Muns0̨|LԫC/Q_^wWo92 Yu|R!"訶ZV"T$iEE@ErTs92Uwr~b`BRp;xf\nDkuzާ^:t1˕ݸ^#1~?70z,swuImz׽sp1~ȲwCa02c HsM^ٝ;却[&Z<Zo{FTf[!- ^JwՇї|e]"،j8ߪ-#.7f_cDƠr E%n;*(YWk\keF]]坢 {ʹ7w -EU/,%f-7*izW<{ƕnDW&AUI !#zj1!15eOmĹ^ !}_ۥ83(1—䟈o&XA΍PODfHm"j2Av*1`$VVKQc`pGυ~W6*'+ɠi`+Aϕ/40PlGI%68x2FdԪ^i*B%n4K'9(54"yR bp̗8@+q\{y~ *oqOmS|8.(YĊ[.38IS͎Z&FetW'Vd3??ՑO~tSTG?idLx@Y*E%u!&Bp6HCF ?`GB$ #ģ2` j"Q-ٲ>ȻP]>OܹWއw]wٻ6$WT]Y"2Dx6vUYE}RxDj= Htg~_Vvf7ݡ+{Ëś9ٷ_ӻۇ2s EV]@gW49= )F,=[ % %fm ='~JPA٨Jغ_+qn`٢lkry׷nSr1Knx#f9{?6be+3zҼI%el&LCvA:.G~[\;mm?g7+>{l JL@J0V\TL3^i#@8T!D77 (X$]m͠NzF+$/KcoryR1}混_ƈ1&[p@:I4jV$B2LDM i@ B0#6wA. lBIxRVHAZi "FPJEZ`4i1o53}?gO>TpGW뱃}U'-n_dz(d4dި$ LY,#BYg$hc; oh3 KC,2bq*`WW2#DRI'H[0 O:Ӫ'j)8^=1r<{( %Rl4:K)kolj7's0FUbh8Tg6fvx|bīN|ۼx0ڈY똅L=V@I!RY8jah-GƷuYŋ-)1F Q'^f%J]+ǘ+.Kt^Ŝ@9ujf,ӣɫ)T[??>w'p2/?/}/t|?;ffg}f|ٴc'gl,m8R pB60ɛ8;?el6%ٴCӢ7䬿,l1KʋϋtoYRL)/+Jq; /}٣u/q_~\:է !tNNׯ.?)[Ĭǻ+ehm, ?OₐZ?,L-\Wi*opjqƈ3/Ӟ88x>%VwRyY4o4)4um8azJ~ec~JU_j8e?թgۊjUYi UG>?#ӋPDW: HX()c#:f '^W/Z>y߿= &Ed9@:CulҲ s!`mqh*@ɲyۊm4DQQԓ42R))i UvL^B軜1g Eyᣗh0 RA\~EzG8w 2U.ܯFvQDE`ddSG*s>, Ou#AR0Lp FiJHd}E60LDY4h(:'|jlĹPIed>ӡl7ӓC;L.?&۠d`:HL^֖ ZT~y hʷ'% j"W1 gSDxYVz◪)2Z Ѣ#[hzrɃ01/Šd(%, R8#c;_6ӌb!4Bpq.,.gpuy\=tiVSo:<=}8/ ;) K[RĦtK >e*' DXY2 -^÷=$%tJ ںdM s;CQ&dlI|cn&gSڠʹ㡨P{`wɤS@Q_FIO!74!/,MuҀ4̐3Yt1Pɪ|82FH$&!6f܎=_ͩx(L?Dyx^sH 빴DrFZQEEvyh gɶUK!3̍3ԝ[E3b&l٧LZ717Ffܡnȸ8_w=Xg3-y(.Ƹ\pq\ICBZedKqEUji+ ج S& } d}a38< =@-m5hmi}i') <ڔ$N~Ec &\LMURgR;i ː︧|GwZ؞$K|GQ{om>16Z)JPaP09b4%KYPPnţ23kB>[dT]52hW-dui@.C{\~>n]Iutk0;u|-{׫̲@vӉ'W`]Ts&֘ԽkNEE؇5Du1Rh.bG# wΑ@; Yt~2RNb`CB:+RYQJVudQQ#(g\t6f,\FUhTFT/=#L;GLCyvWOҼ _Wl(;_q|^HWtQdb 1IIl9MlP2'&.nսc65o}ݼe-#wGR't?yMיt?i|MR 8Mп b_qKɚ2XM_ߟK8&#d {Owυ9&!Ww T5~Lit-&=tM lэXZ7tf.s?yc[ =PҕY(CAY$D^Xb&0:Y!z|3p .1w$]qqp~7:$W\Hɢ!SwbEkxNK50 E٘ fz6YJfH^+I֋ @FD09סHv2"jL(ůR )TQ:ʊ)SD='}쀯ؤ_{D_ӷ?ڍuȥm:F[;zH &LGǭu);W[ZPuo0uvW.y ޵q#-~ l@ p Qz5LlA{^i^[n!⯊*aUC:?ف}]TFxjLAzqv֥ҳ[I9(Iq†M4(K4t%[)aMS_|+Rώ7n:7JKd D+S@$@Gt.\gOw$&íEݭm{ޖԸG)mn`‰dMi  (.hvG)q+ C[C01< 늰6…HWdt"pP 3|$F*skkGl"\;AG@Ej,@+4)IA BkdS'H } EBBh#LXH{I=e@$FGѧaIO+&vvx.5y+~Iϒ)ǸSFWEƟ00ϯs7&/qr {\ )m0-UՏS'⤘֥AlS)V4k]Lgظ~>yjr>˛.1 dK~uv>][nk{9R WWL%bq0NioHBr0l0r[;MfyBa0l`^żB1jerl'iMs5b:;a.#y`)yUC1LEVéw;UdK yj0}\jtTG?_7?)e<} q+0+o"A/&/#~w6q܈=Y|qmS0]171frxu ~_!6lNc)j-$cBg*ys(-y6!DK!{MJ%4ʐHz|S evA_ _Py,]N[HIHp9B0smFuzghد o)w ܈,k._KF |&D&QH݅ PAZhTJ#kpɄLd:IdrzUhR NjI.dg(:Í!ÌN~hy+[=&C w#0' J D>!#i|4!DS噔(`T'c$ccuO kt^ɽk Թ9#rʝ p6=FN;I1Zֆ'{%M\Kh|2v)&|I~PikOFr%27i,|nʑ#zݡo{9b4 aPL]2?9<*O"7Z?{{b00pisKlyiן*[Ӻt ߦmG΄Jڄmi e|0R|?olA. Qg>̕;2v`K&ɤ1jzBBv}4\qȱ$HI;R)@PɠYei. ;\Ś)b ,ڲ˲c瀪'xEPP1"cP`фǢ,r#AUZeWwێWn6ջ&W@V­f 'od>DHd'r\ej'T*'QEܻMfWjƥy NK/ GLK0000Bʹi#DS(N'=ͩ}'"jt6vN5IjJJQy<7҆T}_X? +!Nٲʜ.fE9^v4Gzh^v|'CLgp&SO䐬"}UtEjNEC̖#BY() pTGlu^^B22NC(w5Yv2 ݛf[YCI{OMR /Ul.[Usm.f}u=ˬWv_CȒj?-T֞]K:{7[I(%rYAqT/lm6uq9Nmvhl:Ė%,gܺۖNƻ;a<notyT]ZGwtt6΅sbf'~?;~৉z]i&A,41!?U?+PS1eV#u!&Bp6HCF ?`GD 2 Hh.-o3 @M$*\"e9{fb{x?ĝx4ψ;f%6 z X͍D?}8SlEE ^EA@Qk!K ~q *3gT&o ,ZGp Xq6y>'82w(T)v͌-kqW 9&mJxl嫋ަl+:,Kn8[1̋l2;:n% b,[:lIӍ#c q>;Zd.?L0E~ nTq&SPhF!ъ3Rqܒ,6qmI;a77W}`.o670o݂]TE; " ާڦ%%G)w9HzTD()x h:E&*xj!4DEN#H:a8,rX ;ӊp݅~[:tGq?xRo|bPq2*&ɭ nSiRJp#@+xRo:oMp}#b? "@Qۖ]w8O?-6xY]Y\swB bsd-|1Dm ڏ3|5~.jŁ"y4:6KZL9 iÜr$AiJE7 g ^OY8r훷oପ'O,|;:co^4i35?~rޥkj}LoiZzT#&d,Ppq I#E}QN"p2 ~5Lz&aB&4UЏ1>{_VQ}$,IBHϓ[/6:h}u5qYoNmvE?_a w"4]WWU,stGJd0Y e# :RMI_0egT}{/ϓ&͐'6!O[Lr $X^iUiW㟣,MCCOkmH_evH~0 \bxHl+|9$M%*dNWߘ9`XՒ7>kW*yee/37++76\" | o:;'=mP  #d AN5ԛ2_w()9%NX-EX"&4S:K@6t6| yx'T5<2IbfvUwmު$xG%Th5;bNbi2wOD4}h+zW˿75.6+]̵!.8}(tDM {\)ĄE k")pGYA5F^FݣB[ 矁9}N3~-5Q1RZFN{g!)GD go^b<5zā.^0(jQ@OSF;!G2“UO~A)]k^f~Q%"?mxѣCA0rzׇQq?Jj1싂2`f,]Be/3?ֳuW $Cm)h)$!<[C7{ۖ\|= ³:(B6LBy]f?~QƢ/'?v;5y ݰ7TJW?|*WDʒpb=pcYmB`ry8G?~TP$W@0&ɈD.OE\%j-\WJZq KOɺ OF\%rٓJTJފg(Z?%-yY+V[y.Hy+*_N ʕ p%ؼ`٠GuʫUG. zu;ϙN|Jg҄)M?~ۯ.Mbw_yxͅ\^z5$\\yYJVtEAtJ{d!sJruR`NC;_6cRIX1L^bC|{w߬P6yWIWzS"o9&m.Rh_.ڄЌn~Ƹ%kޏ1chUQ4$4jY#6_w/_zϾo(۟T9WTp_(pLၱkT V0bRxt/ڣ79Fq̎QzFYd1%H 6mtX+#҈c)-xIy>G;$N)ё^ٞeRF,jz0'Ї[43ncc;[+/oUmi܄ϼj E @.Zy.r#ߤS3-cb''VۋzwF3y/Î;n% `HMtK-|1$I`Q* ay6]QM)pZFe)AĵDGg v#HTYGXfErP)Eٻ&'3["ӣ]O5eGsJg.7H B$@ iH-N9 V<P0y ŧۄu;="pK^¬Sfi%1N!H^;$X0TBH"txjYwao#D8I#\#^ B*c,WǜmE!n8-?je#鈨4R8DS0T`1n *T 8ÁI)Wul7RiKr>`ƃׁD`dF0!Rk9)S+6:'pp=j[rͤT:'^ _]f߆qpU~nߙn] ,G|1|0ɕPW .SFE2wٍyxBVqp%3qV3{A{ }C3J"+8$ "a_KA)܌FvGc硫(\v" *'Wb{XWBo\W'3+N]|BhQ;3///ƷfA)S˹۩ 9ej sN$ө ow'n@}̌*laNUl\kE˻Mylŭn\[gk;z^ 7ֻ~7]n`Ս4FGb|HuÐa83˻`zB> ULZOz4soQ jnVjS[l J=3r A~rWx@Lkؓ!@@'g~ m{eb ӰJRn}!ByK#7F ^ FL̢5Eg$Od>I1&/*bbHQ ܞ0yy{~?EÉ0fXKӁ  HvRhimPʷ1Fb:'O Ch`<< !m&x֘Um0̙a8vpG`f'a1r}ՠM)lm 4aDD 4 kwJ(88*m{@j.miw 8P3+% ʴ:rXH` 3DTVHȓ9}B]vU5)\@KЀXEOت2"4"JFc&94VhITx6 H,N00Q 1^!bW+'# I,,6#DNi CkDa-ԝkp3u>;P00|\`;n49(ia4 V0#)6gF1OL0&[|C pߣ :5bF%]h;\fe&N!a4z5낎S&an]2ކ,e,@٧.O|(YmXB wUݫ/Ͻ^M3lwh'/us{V- 6 ~v 2tS33aN.&*z$KKJ)(Y2Ǟ P?P=u(YZJB Ov0\!k??6p6 3 ffPy Q ߂=w}'2Xb8`c <+*(&84".8b= GvEek'(S<+]_4x!FrF *3+A'P D7"ؿ"=iS8ޕmfV? ^e5"2USuհ~`;~Yc߶qH Ɖ{Fºaκ=~Kn"'Jќdsk,,^A t{9TTTT$”Z猤)[$# cmd‚wRX%,KG˽tWQÈr tl&x9 IXGEdQ0A[R.k%`MF0ZBd!,T;G-RJ>y`|@>z%ؚ>d`s{ׅ<Ԅ!lkdt9XxcEpAQTruk4%ƽG%=i9BMj,$ %BPR2X彦2&Vox"ʚ>\WE&9v!,x[ǵTq 7寽}MDdMܝQI&]C6,L(j91]Y(:FKΓ',:oo$'; L°2u"-Vr:'6{ԬE~pG\^xCSY~4IlR˒5"kǮ6Vv&r#i>lL>0Xׁۭ]L'7Ρu1:/[o7͖ y5e-[wvy?Li{$O%LG]W]\0s,& k/9vp$9Zr;Afxs\[~7H{J*v}wnD;WJ[L* 6nRP$(n¨u&0JJLT`+sM!I"VyZ `x9evz(PXi0ʪJEz 92q- 4N!N;ClH H|ABRrG8S=v|zm.U51|~q7bU\sM0?ror͙ͬ$W1\sBF)$ZHh`Ŏe~0?'YdЁ:,!ŐR6Gkf R)٨I B9F$㑅X냉KM s1h#2&"ݰ9;"q`|J'}1v>VDNlvA[ߔmi~ӵxA{X  Lz/% NC[wmm#KyE@38grCN`T%$H3AV,',˭XvX")]uUwW1r3^nd7e KerƁUR 0Jlrdh@6}dyO]4üghEHH4:Bbր.(6[R,i];Pqw!ܭEsT؄oPq3<".B:E3Rq,4qmIY@q!fPzK vڣo`yz~D8φ~bu~`|ߐ y,Hh:E &x*qa DwBA˝=h$h2@"hu`"T;H)^; 专$cAXqz<ݼ+w=yv|`^qk G1.r4 JJw( Y" {o$vOoC=Gdž<:I8grR7<,0(!ޓHoyc<*)9zt0~{߇RV<pA$@sQ{f)PwaTHA z}WJE7_O&0U*3rMN˶oz{l8_>=*F;/O|3m&&{bO~L_7 ][Φt8PeOD8U [79x2EEhLNEɧb'_bzxxHiGa:.+Rr¨>'$!d&xu</ h֦:9!܏X87Nχzp9:uc5*X,NHBTSx1 g`6R+rʓV~ˍV=̙f>p_0G5Vmu6}ttź+wY˗ʍє])wfݦ\YK\DwcyUx㓯M/c/Pׯ=h-3G QFϟ7[2t'2.Ke%"jK*i5F~[,#v5Y/'EgrbWI@ bV9}[5_7B= m-Y3,t/(et*ک饦5u#4r'kW"y2nRo!&yLrLx4WG>i bdTm(PV1"y!m}ޫMBqA(%TǏdH&u_]T]h]ZDOJ_4םr BE3_ec0A𦇪ˇ: ZSTFqxݩ aT<̍>(ާhFL/_5^kcN_^q8ۖ;tQf|<-LGPԩ\n`zuaR/uX{%v"ށ ~u3&f3|[5(>Hŭu|ڌow?zv;.< 5ERF{f!I\i ʑjF3ځ#rbSH""+#KՏ/uPDx}8.﹵S!i 6xH'ZDɂCpB'Ae n&~^KƕWս_mT0mh/u;v~c&"PCa`inv{yěj@ Э4b^ V3tA_K;3{Dk<)^X$lmYL -% Ʉq3ᰣHNT$KʹLSXN t$P!mGHq&8h,Rh5rK#e #3wzq˨Bdj]Ж`I#bHMB#PdwB׆,84T&nWĖ2ֻ̳3 L8oBQ^mcX- GzMbK4 FJ^@3zL H,7(LB! xupɨRLHSN刌(B$cnd8T|f&kCMb" zc90M #ȍTϙ!F$ @.ǢԞHbqrYCI GRJA\UYkK 8}34[guYG0RrRO.0O'SF#Ig$s^09G-p}PZZѡV25XA!> "`%TIn<>oԸGP}3'1m$m3? ]We\ݏ|~!] kw~f vDZ3еkps G޵>˭HrlV$={r mԺcIr_.XOIԜ,j>5$bT21C3Y*D,NM Me)X9b8h3ܿ £~7>63(Cǂ@D HPjmS:zgԦl}(FXQ2%͟B4> GAiM/lĹPYNegYނ JӱLӓc\/f&(vPwM&A oyNͦR7lC)RoN#J199M%n@tCqI{#Zҋ~dEQBsN2&$NBOChIIAQŁR) !HFb܌|bXXL3BY e'_Yf,γgag\{93di7~{A h8_|G-q;P^wd1Jy) L)Mpc<@ ]BjH,;j0K`R魧&Zc\tՂ!}%Fe%#1(J;ХĹفppa/>L>\uOo߂r]v|[͆oa&=$Z{Wv+Ϻ*a-n(q`XkaC%|aݻ܉F;#CVpIj kp.X C;Yi|9s 4&(a6T*$‚dTKk ,,I):L0"7^0-d1<!- Hf" 9j ;q́iٟ)k~]z<~]CYaj.rQDCyÝcN-ƣUIGɢ){W()RdueΫ{UT+ju*ޅH-}zO68ib7ˁ?@0.ńAj"{m<55%;gmshB" L%{0J?YZˎ"JֹM}E˥!0v}څK:Xw\7lP=_7D]v'zd^QB(v5N\gFHN^z E{/EIM~_ ƙS9cNPAڱi c>Vqk.o (=,EC8Ȏ^ǿ}R HxQ/fm맯KRz/4?$ qhrxp:oLX:נ3Gj{Mxo^㳝-7R=FEOZ^OӻTX.E;F\o?:-w{5Z?{WH\ џv WwG^A#"fؤLRݣ17HJ[I(ӢJU/2,6jB;}F> l9+͹lEZyHiDﳽM{~]pue0i{:L"]+u\}; ʟ \q;*Zu*Rx=\LpH`+u:x\iEUR \)fsgW$0lઈUʮUҋ \igW${\ *ҪU"\9Vl嵋6 mQgtpiQK: ]|7v%uaSVeFbn{E駔Yr6AcRJ7<+Fdk$Xe&{zyHv8w|I!I"2 R(~#32e2xDQ=VԔjn{qêҵY}+FI@G &+fE`ġ]BDj{v[@BVK5@Ƿʹrj\1꫽{bw|ϔ5Mm#O?דxqa}8;)6 CqI; Odҗ3? R~L[3s!+ lE Yr5ź2\gA6̶r..ƣ:ng^g00f%Q:j&$ ? 1^k%{&4xqQunE[l7qU<1(i,c8]id5&7== >梌ALx\L}z7qҮ#@]N@D too҃.h0l? 5 rsgM~n{hmlJ=гے:S +S[rvP+Tl6P56[ҞRR*BBf9 5Y¤CBZJ)F :5|[[C\ݡ:FbQ#mP߾}6XGE4ܥdM21Z+#osT:$0 `P7;=x3:oxTcvAON 2Tj<d< ݹ=S,_ȳ^p\uifH dM|F,HnQR-oM|ˣFDQ--(}LP i6,D=yjL"$<93#r[rrGw?Yuga3V1nL99N΄̍ !Az.640L^gOLOhUlκSb͂սc~C UMM{ wLv]_wRkĿXitiĚ`o{wM!R'B{'TN/K Mi8og?./r2ﺪ%=u-=;pZqbgŪ/zCy *za΢Ё\/2%!cZ`".Hrq :3Scwlǵd^:Bۻ0/Xũ'7OXJrCz=IOWgOJ{>IRzyvE+)ۧ 0(]s}-}ݲkҁҁ2k"DI ҹu>o||¢6a%swY4 ذ,6{>jvoqq懶<;ܥ]OF9~ u o4(ympb{xzr+*ۯ˓bfwWoH,jp&6hޣ }H7^`URSKtѱlwfHY6/zg8ڰ&wԦ~TBh+&lddV* !L,b5z浲,wu Z)q!X):x9FyEUy?8{.vܢiLEҦ" PՋw?~5E7RyNj[a.<{9^$<6ƽSu6IϐS=~1T] j>]a{2TO9ة?sjö \ ~ej2yXQlEcb4[mVլsCiڼh٨hP-he!{4'T2 Uc)GpumJY8qRD%+,Fo3'!#x ^(jWg5ݥdO?  ~ڻq<iZiµUbAX^m n%X! >$ϚO1<$I-Sܠh-Wnhms em&DKӤtDv+K+}掭=Mn^)6 W_餓6@/GoEv{t?;@#B@4Yz<[RXpm4so )JIGQ9xb\6fO ) 78 2Tmd&ndUaa58 ue,= o%*mv]z$2Ȑ4,ܮ o#v&BfAD25ϹuKwIē`9r."9)̊8{B`fY(79̂u۞g7bxW\̮v58meԶ=j vke@5yl#Jew'p SxX`׭@hC&fyA&KX F$!HE" T' xWg7N>XƮ b5G`d͓Vܰ*K,P@W~l.>_N}hMoo]io#9+|,f< 4vkk2Ս0xڒ%$ Nے#ee҅T1|#dbzB MI 6GwT5/yZ4{=cB}٠k^>WI_܎(Y(D*պ@ᢶ8e{ȁ<{4twpώp4 0BP([m&uZF패DD9 UN9RpAb"& Θ <<*8gƪH^*27oKZ.\{Vz}U0|gԝ~uqliGToR{, 4&}ˠ)3ZuOQ*Ӧ?i\aWQa{HqX^*+ӚW8P:'Nւ;Ă-8x3 N E({RpJUIg=J:-PBy$F!gex|@˚I$<#:4/9/SR.]-,T' ntk%azpqajC{r'2V/v"K hqS Ԝ(.5*Bp Â((29Imzc[@-jWÈ0 |9kDd+)P8R[jp 1l`:'Z+q ް$#^2pI7bnbыDy܄(H Ցta zW0_"$7i||,UChr4*{wgrxe^^ _>>m7$ca1"Y8 ;x>@<~&+*3n0םc-_h~1h\Ⱥ{W+iZ|ר(}3> wvkpxi#Go{`HS֖FhV)l6k6:_Ŷƫ \8Wn:f!O!^YSv}qů|ŝ,`fnp\y*Γ9+7LlKAj^o(5Ygn[tjmgQ k2ѓ|*0D6ũǩ+8|IU`/D46B02U֮y7鹮QTbQɳ$ SJ")mcV:.D}[Zs6m랾MͶrͰ<}EgTD"TOr=D3'Hf- 2ʜå_u|Q}l6LGG[L) *C]5V$mx[8g`Zu>N ymI(Y[xdFs!@6(CH"`PXpHID:uco* *XTT@qTrTTc$փA(RkH&ډhT@ &2 (JfB%Uw,Z=a( 'Oڰ]3Oٚϲ:z>Uqm3▽BK|[˵YWy'27sǏ;8bg)7N~B g?0~p"'~Erh}>!..FC ;dr.ŭS/h]hx{~6_K@J83Dgc.Q:s>Ⱥ Ry=?lc })S΃-Ifx]]-rL*(.h!r]&g5A/Ql޵a#w9gfF1;O43ouq6GrÀ[?bg:6. &zySI6TL2Y?w(F:>??~݇7p3>q: ះ0x}jۛ5͵آiêoӮk}9}Ҥ-ٮ/w}wsNo GhYl!1o lCWQOoQsT 9*J :o@<^?(7^*H#a<&jSIHA) xADD mAqp :Qf]pDsfmmv6((gut8ԼdՑR!syyTVx|II=(xÂZvO=Q?>%Ese2gfƫM٩}cuu:PڰӄD'lgf)7\~Egp I#p:Y}RTU @4*Y @u*%ʽU]x~5ں͙PE.P0vA{xtDQN0vi d =\;Sz[9Vh[s/jgP<;`9Zе}|]?~lcq5V';)I\Lh|eSLզ:$c(]|*s| :WbF/]_hSqRߺ^4,nr=ArcպĮOs;qjq}>?ORLPc/b'G,cC/w_K\B,#/:Ƈ\ ~ҿn'cKSf3c 0V r[Bo-O -ik?]Fl {ah \Էɸti}RJbjZ'Ҵ5DlLcjH \۔%@[C kHG3 !+->"rQ,4[ԅh a[qY(Z9EgO}%Dm",JmJ&eҜWK*! 4d?P $XfA[Q1xΒITu;rpP({^1}"n0o_Bsr~[i߿uZ;Tem&qYap%FW8xc oLީ9M3uZ/ye%*D}Y߶c4l.Xlpם85ՋIګ}H+WwBƓKp\|t(҇XJ[ &no4;#nu.2 d3ܰ%nK' aN 'zywB˨ U'o*48!G.n'y{JoһǸR,-"3(̩R|v 0ĶnpЪywCY7vS Z6CW&M+**^KWoBWL"Y S UF{UF)IKWgHW\qh snCW.42ZNWRҕP&5p42cWv8%3+P&UCWVM+DI}WeKWgIWR#Atm ]e62R·6M+JW1tJh;]eK&kKWCWpU sCW.MV^(u]#];t+ί(UUѾ-P2rˈI,6evS#Ŵڄ1 ŭ11;a6Q6,M-^]P•9`-O}d7;a;5Dn(YRJt3x XQb94tň&-]!]!Wlv%U+ES*5t(a-]!] *W^>TwB IS cKt(-]!]UZ672\ʚBW-7aid "C%>3VI'S-Y֌}-ww`owS<_^t @o򣜟_rS܈ b2|]bu 5W۶' _nm?/?<=,2]ڰ|1?W}9v]̞<֜i۹|;JNŃhDvӂD1+@#Q~c~- 0t 7_g?f|d0?} :?CCr|(eCW@PǦ__aC^P32uJgHņf 1J)'>/o 9<DŽw+WϮ_]~l 좏>, =HFhj.\P-[a}N>Ojl s]hK#2 ycM…򌦇Ҟ;:zGhа';1MG@1w#%2;4j=s%D(c B]2&%a0e֡=iiv[f-J5"Ztmw v݇ (X֫'%k"w7dʂψ\13gI!KO 11ӹ`͐ӷ7TW(:͌g{ oHD[3gvfl{D@S[h{m)1ۆuDqZ' c0G ĐRmY9S-X89f0Τ1%<cBq ud~&i+W7KlI1f]lY(`\V- OI(%6eΛ̪furp2fނ@*iv!fm)^O>l-9l-b2Kٷ 8:C/rKO2)Kh;]~`M DL'Зɍ+JG GMEO6Xk^l%6=d& KveRYTةcXc@P"XkZcC qȎ^MHd^D`֙/+ *9dS,NJb TTPtv@[b Z 4AsPxhBˊ#yPk$(Q.\GPyUjҕA[]^ɶ 1Z]{eQ\Zl)nvg%ai "! ɑ`HJـl!C*5D@K-'w/CAqUT,%x_bNq`' ɷ;WH *[6܀]N`6ph(\_dd d>JɢR0IJV2I:$L , WJ+;͐jPoh#x(c)(b ,AH 2"*"͓m<o4[SjUԭƒ]9  {`&108ZJ u9qJ@4`=*@A7Md&X`Ttg@Q"Ł.0͑A(Dkϲ (Ez@?PY+ oPc4i<ޫ(i E"'0H5CBr ,Rm#pPB'Y,K.d@HL,u{4H.4bD U+cf#*, ԙ)D v%[twh{".M7YV&g$'擊Mid- !pB˾}̻yvf\mdYsSޟ~Uf=f~4۶̘,LG1>*&PM.%Q!Z }B ֥̐MbXtLGɓ]@N`}D{".B`) >@ E&rZgd^0P>DTLrdgxILPc $k(ARGm!@@8ƁU]ܫP -w v<&{kl4FNG/u1t.Oy'0TM4`U>Yb-=!DC`)B^AĖ.k.7+M?Q@R@"Hc:2oEx8g%|m9!+bhǎ@H[^B{]"R{Ռ xXZge$L(DdQ:׀7Nq,C$ fzakZ. u"NQTDFw`QyWR"찈fcrNی|bsnʤx9 BGtt;*j#Gطw.U4QG@֒u"e۽6~PN zfy4|}۽ʌ<&YAIp0!9kG6G=Ԡ ʍ:zV{ё:)Zӝ2T1J`[Q2]Yڃ&P3@zobk֓b  V$,aK}ZF<$G9))6Dyǰa‰Ko(I\{QR3L bA,k~xu6\lFژb jRo =jf=vekANOhr~.6˿6էSY7g7_ݏD6oyOqz7x77O_.2gO3Dkymnњkyo:$wz7CۭS"ȱr~6nzqs-2CgřqS|OK~E> &Vq5.kA dt> f> H}@R> H}@R> H}@R> H}@R> H}@R> H}@اъ|@˫9{G|@GO5> H}@R> H}@R> H}@R> H}@R> H}@R> H}@Tkpnra:xPZ(9BR> H}@R> H}@R> H}@R> H}@R> H}@R> H*|@z|@s? YJt> d!ʤ> H}@R> H}@R> H}@R> H}@R> H}@R> H}@RбX/X_NBJíw|nc]'- YB+-zU0-mWs"MmIPZV1ؖ~5$JkrY0$jJz1:] ٻFn$W}i, wdp 6ɗK_ȒO=]_Dz-=;AY~,>Ud4EoUAھ骠l(]GtU ]\X_ԉ]R b-ҕ%t^MQU+E_誠}mkx9]|t [s~+)W%Allm#RGAV5/gR11wF~l:5ӜP^+fWg'ȑA ^}x j9ens.g%,5@^M *,T?4Mҩ߼ nVq>Lg8,N7>V jא*YJ I>i \1Hh5,(q"]IFi[zCWT{AkXJJ]RRHzDWWX+]J U tJK̬>]fqJwUТ:]z E2 .zDWUkE_ҁUAi@Wo,2ke1uFӆ bZ}ТM-Vƛ C_V`׽RxD -Me.GwՒo޹qMo~i02U"Vwş 0e£s^;<\aˡGRdTSd|׮{DW\K8½b*NWR1+􈮀i+:Vj^(2]AFHzDW ] BWwUPztF[.{DWUy_誠}T ҕ$RGtEg=kD;/ Jcztʶ}"Rg*h vbCd-Iہml8wUވv^ JA E'"-NW.UAS/RbMr٘[JYq-0NNBڹfz+ `0uȞkt>_KBٚޛZYA#2rLjcmhԠIԣe,nHU.~__K$;"~~L,VG~JM{mIa>fgZRg"G!lIΤGI K9:[ mwn^96HULקpY] 8w ՙNɥdÌ7ܒ[ r!\] gWڃ Q#)vQe|"ékG/G){[|gKOUg>6fU.nqԗ^M/';z\&WʽFwQ~-Ϸ^)tyӻҾ!6帢ȵ&KΓt W:y||7^8uG:'p '\Vg5;|uc"oCaBK8(]6JPt D99[ &ZsE= ېd6K^ R\ȳ;jkpCK泫eW~cW5q`~xIe͓ d\F:RO.OB9t%AEX2H8I`\ȅg|`RIq- 4`(a58O3DMպsui5+9m˼h^x12 8Q%$$gD]֬+|hWme-ntQO" ~yfUm,>#O7D^[yOؿ~GIptSitt8zTFԸ&M)㨮&,HH1N5f)6)lYt\^.r'gS*?J&17-JAɢYqi.8}̞5Q+EETuMZdL5k:]χָk\ZL^s/'9cma/4w~hՑ;ƮX{XaMϙmWo+Y|ɻ0^;4lo攱+*rv2) -ɥCz+gngZzAOҳ~z68/Q*HUm4 LAD,fRJLỷl72d)Cx\IVeAH2CIj)+Dn[i`BY^\n$f>wln[F8 ˸@cK qewZtqbyד4.ήU?WlR]N4UmJ!b-8hQ*($srGk n?8;t|aكk34pdcQؘO l1\* u*2 BrLA, x eƔE $EV  VCHΈM,T۲#g3{M&7j2D4uwzDA-׋> :d=GG4d Fx8i ,QIj4 eԕ=isYPu,X,'C ԑas@}s9dhN&W q :s 2w-{Buzp!tڤ{.zuߴDOHԻ-v=vkZѧ|t5=uw8pW ۔ z%\V׭ۤ˗dh~rdKMxWS~K6gq׶sީXeۆGk)HZakeDT68۰㇙q z}1QC;H#IhvX{]U㋃n[땨K0,\b,vyMGDCR{E?xgBRb٭:j38̦v܃dA :1^V,i8L:DFEL1Ji[`g! HK(PӃC q ! 4ЕehI 1.htjz5ѵ]MjO>G_`Ut1;}rߋxҽ>>OJxux;;mǭI7|ŕͨ3F;ǙaУxcfW0TZC`r30wQ2BՉNO Zk%*ekVMz#%t}йuʢCŐ@BXtBc^4W eJ1,QsO-GAy QI M[,-^2͑ m58x#qhu@/LwA:|4}2Pci ۲2rI+dԍ \qL(Q#+>@6OYj,1W:╬ĕV\ xA$ ˚RRK=vgi$dɄL2Z U> TypLZ;Φ#|`PGޗX,@{ T?FUg9G)֢BdaCBJj]7g.YGAN0GD$sTl 'Tmc RAPPCoSvhz WD WDv ׼hoʶGZ/rR=ҺP׾\L;[T, \h %dRPh[eP>$d `H:g٠śT`gQ*% p%hݭpc8YSՖuVpWO{4U竿ȌJI (فTdj̿-hOlAh{yq/VӺg7t^o|ߴ0ߕ|0o޾yهgJ=&Ф1'#{܁U&5*ݘ$rS/6tD6jcjs[5:$#$(d$eLdYŃw9l|6[@k[u Rq tLX^,Fb3Ƃ !(*g;Ftk`tF>!5߸ oLg`z&~}jM[/t^ 煠6W~'~5t{-l{u`=L橊4ٗ.Κ 4FKDZ: Z̕poufAv34{W{R=Yanakb|6dSU&tKUNbvXSkz~v4I ۼ(1 D!tM65)`#*2ōy>jױanS8Xl˼9q`Nj1>h1N{a~kAG.yl~JiPo-^߰ZL.gN?QmBZR4QsP#6OO ,\lyn ;upMz?nMG]_Ģ0xzV,slU@!Ock=B:#~`tQti[WEYUE74-X$!hkCl~OZQ ؔl2SՖ2WR*"I3Z6-}S! pcyw%iT@Ui'$ & ƙbJI`I*syk/4pr>!2cIT,N{ o&i)! 1x)(1#Ӷr7تcKPLG4yj{Uu2>9B3RD!I"BKb2kBJ Di_#TN9>yB@zT< l,Ek$=%e"Ic #dQx 0f蚈4KEj<Ze2%qt[ >Wk,ۻh6}43F^է] 5Ia08N#' ?N{v=0,. νZSFMgZä́ lTPRH-88Nk MtO.3t{˟ J@̒9QPN$lZ hta5]}§EjOh4ӗ}\ wK:L Ezwgg758{8;Nxewf䁵:zZڄ8g=q&`T]2ƚյ.}ׁɸhYQ9Vǫƣ_gmBL~=џFDodN7 6c8ubL4/ίǣFOg[ǺfJluMnZkgWNOv}> 8r0{nNl1MϼhgLbq>˧~IᗟXw`FPl?7 o}`[a診\bw7v epi[cv+2iN^(\oO'kób1npݼvf7&ώm%|{Ntۗ#|)EN"C#z(5e6I 9phҖr.$iE )X2\( I3UAj:sZqX63{էܖbpM19ٮ-,cpunYL*`>S N R1bw:'ۑosl+6|ΎV/%\~|GgײѦh/XLF:lf wXNJ.^*l x1AiGIŘ槰[\0o/ >ȹl݂>Wko$t)Q12gpJ9Ph|1Ā -Y,aC@DxPiyAƽ` f_#J@UbNH=3D:YTN Mg %*QctUgD̩I1e*]Gxt6(c%z 9\M>ҋ)#y8-3wVvpKlQ`ˢF3hjd_OlڢՎ1@lٱX$<ĸEP7o/xv<mIЗKnC_$Nf쇛ښȒW-_II-٤DYl`tWUU=줞y$4$6И07*T| NhȐ( #X\1DDMىYo”IBAQSɭU(M#.&MSnV0ݣ̓G?<ٚIM۪Ŏ9[UhBq@e1P-q8U җ qMҷϜ9 T$ Q[qb&%$HBOx0'$B+WYf_|Ms4Y/ثw⍧~rPEe} i\Rq=ߩ<&l C?04P&"Z_)6 NcMr@zkpFߪI=&5Q5IA\I;|E\4Djd@Z5T)\ŲNV ?, "oy%ګ$ֱJ)ɾӎJ`<`sƥM%4A"RJrcN02F0 g,ょCN}vwyxO7+_ABPC&a$ PYhxHX S ") DBhOF&`65&/[;\L|T:ѥk_3\X^<` 7׃_Ňb _\?:_0zˏoCv0;Y¼Ȑ)ITͳlgmbWf F6xUX0EE/ b1µm3մ|A;y-V!SpmbPZ uFׯ>*έ^ꪗGo"'{G/k phU0nAuЃ}]Po^ IϝZCqp>a-1I/*uIBD_ί&i"kI0S0_pVKსX&4T}N;)쑋J/Nd_Ơ6MQ?>r;-\dB P Myh_Qhmy(5*2:NXp!Y\oλ"¿'q~y{wg;-/;LxD$o^y25ޜp6>atDP Mfn~r㮽^'yl{`?;Q0ptv=0@ʙ<4# pGlYTc7=(0Bds4B{^y,a_u19PV?}=ڦ眇m>& 8bB軒)&4̞WS 0'&'v-C+OdDiڥM﵉}EO2:DWTh&3tp9\,.t9% K ]!\%\+DkZ]!J+z ]q7GWjw+.99uXd- dtAR']+GStprh1(骃t%a%cK++u=:(i9SDWXRg * ]!S%uUJ++w• Zx Q~gte4Nԡ@]ZFZBLtA6GqQ:bPr-9n%G⾼_u8ʞߜ/-'C_1O`+kv.MlkDqw/DP?"ED), ubr\3X?R KDD4"䇈SYQ̟r48Z3C0W hm%7&*^ L=>m-tU8QA=ex]=5B+]Qâ+9wm+@YvtbF[+f B Rz Q2U[FKtXM=u zhm;]JNiOW+a%!Tc "\%]+D~`Je%Ct';ڕ"0 ])n1ȭ22=]u4Ƹ]`+3tp%sm QUPa)u g g\퀖 vB`'R w6Ե롕+DiNҕԦ}ҧ (I;d(DIvn̿B[]ET<"'I J,"ɔlŸ$oVld!# `%FI0L2JHh OdDa2sxԥ}cinA63? f7Sd]6^^xooA4!ӟ'Z6ŋֽc>RF/p|$1: |ı/cVTT+qx1g_zU^qWГC¸ըؒSw8"Z~"7a=e9*d EV쾢L B+'+k+thKNWR:HW kA0 kʝ+++thl;]JӶ8RZ +.++Ѯ%m+@v'+.:CW;]!c[ڇ=]u ) [w Sꩇ%7=]uT3B+D)EOW+ 9i_lQ`@#?fkJ6Xo> jQ}GL0{OM$gUwei;J)[PEȆcjIIz+MKJba;KiG.sOULZ/+{eS2df@fofדxKJA?WàfK? |9f!>{_ ;/jBnjL ]OhbзN-,y4< 渾2ȣgWfV6[K<#﷦Fǫƻa3tdfY͙eJ&gV5vwecx+*ke#ꫬk{?!WPt٣<"3W=(qx#-13ΐ#a󉨌ܜ._ 3XI^ąrC]ݣbrבWRɆǫUuI/ۮ<ʎ::\ {Öb01׬#iNA 4GYΌ;uYx$Dkx P "]YTڭm \D~VqUġxJ}l<&$4A| @Ӡؘylr H LoP(0n#^qWG05hŒe g<ˈTe3Խ2| ]QzHO-U=μ\[m+D@NҕTegNOblDPwc]Nj[ɲS5uLj\ fjrL}]h6Lݗp>K-!R~^%en-c*.9fW'[YA/'9b݉[>)vXW[l&?C.I^!-&cI2~ syKZl~RVA\yl*l5=&J\O+#.ӱVHQW(xDp32R \i NHp$pE)eEW 082hHTpeE: ^\A`?"WFZB+#eh\= \q i Oܳ+22 \HEW=!,q<ʈy,p*<\ ƑHK+$1 Lq9u>! ˁ+έl W |~hA#])UZ|pOJ9fe8)(󤴣"_K`s5~g'~/)YAcTiT}` ZW -Õ2WOW#+X>yR~ \i9 &zpEb%z)V/q  :\8\= \^ *+#.]i%J&zp%2&vK+#.fH+tpe -G.1OAhgpb \u,/Lեkczˆٞr|?]k j19OgF9d= G㌗ZhJ o[(Tg_~6 =^щ"u3KxUNzo򢞀7߷׽f<썊l6ovkYge~PۈOYgTٯFl<Mw!vx5O:]h83;PDbxHU7pUS=.d]:yZfל7gFҫt]tȌ l㳫{ eW3me[Nr܊A1Rr 9y;߹`bƈ(\^wFvnGw<pV?ï8z{̈j0pg4koz#:٬FjݽQdRS ^3<9k5ļl Ocqr\KQgrtc]`uyc`3pөտF}pl2ڮ9jMy4Χ\ԊeTh#2 Ѽ*'U#F֓?G+λF70́ E')ILXގ :2>l%/ȨT|͐-̯ }1 d{7%oOdyYB 5 Bc8I]Yp/Ȍ?u֯pb)G\1G{TTd3ڝ9;ڗٕh*j_{hJ$jvJ[ N^zRbJbJ/:ἰRL7E \0+Y6_NV=ʩU%Ꮛ 9j̚Hg@[uNBj+exlȼ5.x~=le̼왙=3gy,@cpCx^f#^tyCkA+* V @O慂zGo @4+G }Qb3j)wuA3H3Kl\&v̳Sdfߌ,KdaD3d1E?,q NbRRSO^en 0J$ +IYqȋnqzqɷl$)-)}jьˏTY"=ާ`}$ 6GX) ޓWGXHK!Kf Iw͸H;xO(uG8y_,Ta0+Wt/m7s0sm>MNKm+ƪ\sLƠh0/9C!5X7|_*ݦݖmѴ_wM7өRdEک&>cG7c"? ,Jz)&p%dY/H`b9Vɜ1YѴ'#1'R)s$FYRyE S>g5,qG /#3{u@*OZr49/{ 2G5\6`ݔ[f9H9pS(uIh'g*r󂨭+?6v_Bӏ}3?vOv#q>0>4cRQY_0/hzǫ2lWuj%<uUeV:ˑ.2A)ht^yz.X6tt=iX%/('Kұ{Y O1F^F)7p釦G3)(_В.)GUm_LSфKzi4u"LF3z)&XhՀDƣ,t2f>$MAsp0 gUYVdYY88+X ;)>XItdVwl{+r[dmmsLuͰ2gCfV+@7*Lފ";gFnU ϫ@SgNB(?̿^#-;rcP<[{f^wH<22Jr05No`k[ -@^> v@!P{5/HK6y}8:uvf}0j{z{J]?#Qb ű~S^\J =,K h1x^FdN`gl?lYcs )bl2lɸdQ4RL#a1g+J83i>#!guLóBiМ1*#L䱑pV$3Lh^Jxٞ;f6%ed̒d^V&Lcnp%+cU#zᬎd/Йb:4PMu8IEdxYє/cded2y笎ئJ>YZeN[Тᬎ;<G:{0L.Ę~Ӌm>>/Ar/7߻g4ŽB}T*]4Q E忉cAB攡./}K5އtn~u}+Kխewa,)Ú4(oT8- RZ4G" |R*\|gy7_26!g^BG.{7LQڠ1-Ex4g)F1%Pҋ /B GQ{/+K/c ԽӢfkcB}&3  Ƭֹ[Zh%SXK)YqMx-ܶ3%R'ʉVt}4AT 9kA츟_&-IXE$!!, cCڳeDzi06F3Xŏ% #|b:b6U#J/# f3~Lq9֥"=J'hǺT22qx8c,up|7`,|{1J7y)&>>"%G2) ;s@ں M6t|B<߱: [*0 vƺS[Javし9u'#3 x4%񼼗c@!wmJǼ_C e}%˒x/e[-[bIm!%b*Vam`1+10^ f%d$]J08N]ZÂU\ zdNm2 =aY/-9%İௐCή'^HtKxei\eLQHƌf4N!eÔ803 4!"TviM#kjkѺհ싐kbAqt~뀂akۅ"A\ScC̠;6aXJOx7-E+b_`VÚHhgQ a~)հ nEVN6\ 2]]"V·:GjXĺ"F뉽jXE*LB/ ^N#aq#Z=9,eIh.%҇kZ>}ӆyA_i&/7x!оܶ;M9KIQِ^Ix;@zj,hn}Ţr2`gV}^yԭ^6n _srq@&H);ŰF[bq(&S"I<IIEZUj(?}s845bB tD플PNiӂ   !5O9hziD e 1R~Ƣ<*,xVeA" ;bHx. Kb*6ܑ8І@%fǪGm [mgB{0h"92_Nŝg"=Ѓ[p F?tFs:Z'ia _\#lޗUIj^!=X<)/Xm9 fO墨l7X{7> lqBQ;fkc tâ^o$i sU.(͙R_XbaYB{'k,r Bǧ[tV4ViKE[~1%=٭T{k[˯+>{.f︻]^ss%|6k҂2 $Q0TXr]٦څ$h,ܫ=vbǡAMȐP/ ny~4<J nQ^ynb<`G`ih|SxCj C..Wΐs)5!nx LZ7XYf~q,h> WcD+OˡHk")p@dihDp=CzѮ pz\K+PԊ$F}lfVl-dq1͗mk<4'+&_>UUSg1{~q_VOߢl߯m4Pb& Rp - M/ dz%!p i+ClYe"qH@(eF82J#r%͸1.h1dNF2ǡf#CظcO'!/ D5XKpli0@ $,nvEZ K$gO)rJH #F EPƐT1=p\&t/pҞfAYJ0䘑v$j~C#Qj;z9bAcG3[IIt?"0$ti9vl294bO#atNyZ<1vSEC7Pa?zygOv c1voTڊʏV!d6ű0>abx*0T PG bYTe9P"wɨ2-Ot&`9b(41"Lu݈=AK9c3\%* Wۡt^c&&jVGs WECI}#zWܿ:ryHO.b%4OtYTk<%pIN!r/u0f_xs'T(m8D!{9Be$o>gxJ-A {= [ѥ:I]ń8XwVoc9x}qle"R¿9 cP` s CDetCjyP@lG3~!L<Ƽ|7+"G8cӺRs\ 2^ƔRX3y{ISr>3L)'2NWΈoCOK0$6|3]LbBfc74#V*_&eU|}n~:M S1Md 6 #|΂8ZB4>2li&o"JK 95Sՙ"ٓ6\C/@PJK)M|^>IJ,P@b9A`zB;D&&Ղ9bB@q8$Gr p>y.)"DX uVDP}ZJ=/!!unBXPs*P<̄ nYasBJ=NoY݊_5bS,`-fVPBr?䖗E"AR<|zoBI)xX5Gy]$=YSEGDg0}7xի~$UV!O# npPg>M\Ӧ= ĴSgI句K&WN@8tz0$q dJE $éaqN yzd$$tXd6 5M2 kx(c(c#U [qӴu[,KܳsvCDƹ1H" $S p*Sw jc2H[Qhɐm QęTIU@Ỵ$II!`Mfpl⚝PG$;󼺾!gr;G"#Hu<ReX4z=ZU1| QRs'd#Hu͸j }l54|j- ֪?7sdYz_G=f4X:j QOVDg.5#<)z,*zuT*+NZHZ˭3{%2QJAYMTo&3I@Ll?ej m֬t)D ,R@@DhXe-E5ߤzW[?0;qhfU{evQq{")M6Nn[)x ĥP5rO@Ed~t[pH7~r3_JGkcEX}m'PZ U9fQ1HD[,{$}|ZOsMlLop JsXh2*\3516dlCoYÎ-E$/4lץ{G)>S\6X^ƣF6hߣ/e&˔(HzPNyZx?c./gon=O %g.4^w0%Y)k0z%@HҜF9:{kG; &SūN(9@(EFRi @(q*Ls z}A=NƵ>|>8_7/{=8/9u1ּc2p{ sAO0n9gsqf p%"Ө."ygK삱JΜ&=< k$XuwQqJ!6U[6qTD"2dlۈkzS3'1q=4K%=s|\U>)N*6h ᪚Y xa2 #EHaAL5BH)8_:G6g!_y(& :}$o nq d`E/y͚Yn(DK4Eq\ˢ4@;@^w0s&TsCx;skκЀ6k( :63@3 Ȫq(* ~gqx OjwٝN@*0T![2@IJL,OmN \D6- TܵL&dIk[I"R^|> g1*́C¸~~YZUCuqR[EW-3 C7i)mS}%T,`vb,pMX\$Kur&Ix(4LMC'wo*PIJb֎jfR^'/h[o{b ZuZ.}FM}% [\E̮\.1DCZ|&4I>+gؼ:r&:uJ2 coQZSDl2 QMg1!N1@K=𨹥"wVۇQGh{YQ,0oSIjY&|56^> iǶC{?6+ɣ=s|~޴T_nDJPIY"*joTWR߮k/nEI~r, T4i.exNV-e\)e3-D;7L>s't-gaKҎ^ю͆ .IY5kck^$SOfY€yby~)ETh?Mj gا/LS3$ȫG/(vA 99=&>(*[num$w8iU>WYcP7e4Ok5I43&_o~<+&gn'Ɖ>P(*R X;.mNXMxoO[/-w˧')nFg42i#4w̗k?clK߆N/FPԺ⧷@T˗c6$DRGmcJHƵdOO3 $ b[a͓y #6;mv JHoDVN&Y!S~Yz vv <01fÂ%ZMO&j^a$0 չ2CD.ɷ&s-٪l2m!μ!(n[?cD^-G7H[+܊OտGV>ӂDsyu|<&T*$Otnu(  ac}I%ߓY LZ?{Hn俊`r`92`{rbw'd˲,Ge-A[*d퇋ՠbNO0J0Ԟ"D9mqSn eHR r:ЂP8;y۟5N*&*@IUbQ{~XϞbZDdx~nH!^z*Oj B@t SL%S&# L KC'};z呭˽ );]]~$āFBx)kr$]E0K5o,$IwnykL>?o3xݾr;`<@7O4vν);zQr8 $7c Kvw7;vߙ:87Yx AU}TyMgPѮ6dC:Ƨ䌽;#h,VB$WC㝊J6+_>QU4 bR ƊL#3g(U\x̫-r `d2_{v(*ӭℤE}>OXTN}>L 2k̨e"dֆ6Q6Z;ppDc݂ ɮ 4qNqlԗQNsSi=PӛȌtf rz{d^M=ުGb@'&ژm^L-`p|Ai[e釹K)AC#T޴-0r:!o}iԸ7il פDyF(f*/̖e\jhl^p+s%Dnva:l8( dݝ kh|4D,_*[wjsA8[jybokRC?"abgu;"a`w{"]ӞCwHPy}rx7 Oާ2+׭W-xU we>=mR tN@;0(&G%-maLƋd|9CK%z(/+v Sd)qcwegq{ˮwi*>= PO&2FƷOF6[dIZbHʎ,bߠpm 'Yǹ= zdR*yJ^$y%~k{ g(2Z[.Ե11M𹬖4ٚ#)F@o8! @u%(s4,,jNl )gh i,SO1쩊03J'B$!@.^d)*Vy}T/L{INt*ym{Dq =ͳ멟$>@B\XFMF\q'wRV֢݊Q# X)H1T]$YU_~iqjTXI:p$ OQ O]>p4oYKHJmYHRЌd~]W h܏+l>_5I?nM$vOeOj ($Nj:̕g>,twr˫@1 zc?2Wq!Q`̰Fwd(._ 2lZhZnɆ=9},8OfCAB?3S@G̲IG^:J_ύPZ>Mxd^Tlt{ueh۪|OT(s RoWf~\.H4Ooܖ)cƢy%i"AyzՏSLj4%[o(tKYnuTi&2})Q=RcpejBi0~`GX|(cWJ56-&XˠykO<ܪr_ k%?~K hDpD;>>5K|-tV_|n( <}M *B0AR1GD0f3ҙyOiv2S;T;X̫qpr6gX9@mU5/> %bфֺE.]QZ0]^7/:fRx v=}i3˵rAs#×G:gEقݜގ+$amnm Kku%W\ *Uڬ%X*$W@fqukl֠S*LQB/-bKfı rw6C_(Fc9N?+=66Q0:עgidphy3hgbn߫q\k-(9[₵=hwXT#; DPUܳYJW?˙$&*0i=V"8Nfr_XqOz`:Md~}e Nn?h_&96<XPf툳i|ECڳ^WLggIŘz#T5nvnYvRVtSF^[зHOsUI'L%{%_[KIOw-LwZ;~UzP9$D'(4܀yy(s;9xWO~; {ZB(|-l2Do tjJ]ݍv-Ԥ/@.2\IHc`[r!ֺT$Bhm]FQbNfJs./:=Ey1L*UYms*Q J=JjT!%pB}&{U@AR\־;;m%nGFw\Kl0޾nym{SiPMc.U|0`F:{?本VS%#XҾl,$/1Atp^XITҎVVaTCmw-U@s#h^źhnVWqroL k|t1JeZMꕮN-N0/c'6cZb@4iȫoBӀp?߾_ ?ln ADS^ϯb= 4~I0JsLwh61pV1⹟/"8N <<5X;h.HWUwȈ Xwt1L-޲ni$TuڢJȅG4i_q I ;!u AH}f7E %#غ_b¤^)I[JLՃzvdXv̎-WF*5Ʌdg/o#=/|X w]cq8Y,N0,f6Z|8bPf٤xp6φt~v_+4,ܑ2S%gN$.SA }YxŹ'u~|yόnપ߮4]?͊Ƿoy/8b"CӀS%P qbGzm;VW'|yIx0$o c@2P%K $,5&8u42\rAmXkh| YGTi3. ghl&ӌ˃#hMu68ڗ_~gg7{K/=xrx_-2'o޷_XɏO&ZH+rgйd `y&JVB`CJEaYrmդzm rŐTސ]̵Q..IKRM\rf ]t[i@ S)rJFBjUKw=Ipc飣4ö @JUd)R%MF H^G?ܟ8nY]+wǢ̭pŊ"RfCng3sSRQ`+JLQͣ<@_STygYّwGn~~rTy/M~^} r4qSWye3xqޏϻ[Zfݬnz8?٩翟<{|Iş'Bϔ l~`at?q/yp7wg1 npc쾬l`Y24[r~T%+;+!{8z^[f_؉Id'l!Qh:Zubf 祍iH["UhQГq}:"PK?4X A{[B2OR1=!0!fO`~Xv 4{#fz~|N?d>Ys5Y lt\ r&Ʌ;A܆s)4T9'-<2YYOxn~[3OW9Xo^t %6wjwbI=FX,Eb8hkv SR-NTà j28<#\Y" xb휞S_ы̞A) 0Ep\S~C2$wA09*iUn cr@54^ڵm+G-Q[/ċ*BS!Ҷ@u檘}v,J6yeϠOF$!T$E$]"4u#5Ӹ%5E²O~d1iJ9Uט3@!E=!jF\}" Ζm[4LKSRy[z̆"F (}j/ހ1UG95 j+9"uV.l$JD9K@ /sdb#s6IFIѦ]\BeU7,B"p h ҙ(<QF)t6JcyKI2E(Q5XrhNCOcüR%{Fyʙ ܉@546xϸ{,"?Yp&]Mh2礿>O>WRh!3b ŊN:4>boibɔhAXBQX{}F8{ğ8R~dNԅm'oqZ5%榛 S/^M" ^C={o =g$hґOuT@:յbUl,@hGl h؁"je.F54^޺%H\m"Rf/F!1IS]n/N\= {tz0,ĸL22!Mޓ>טi )\!wju%28L]jtcKAO -@A ۝Am-")eA&EX^HhRY@m>ܳs_8>V(`&Oua~Zȟ~⌿[-kXhF){Փ|t0<C8`ϵ̚ZgAe-ORL4d>ЖjGXWek;waHKh$.U4E4 n81ͻ@?BeI:CGhdӐ*OZOqt_;? =EogKW17Y`ӆ\ yw pp 2|VߋkiBb_wuJp0$jd,(Svo+?n: 7}ӉF:,͙K:9sKvp>~$ ]ϛI^(;>j WHM5_˷h\c-r|q.+ϻ^PL%<1{G c\ ÿbȼH/~47B`f;uVIB|[DŔ}:p[?Il(5yA}$ G5C+Ar].|۾,,hPiqMA)T˳7_ ::JF4΋ӝH7.#Nϔ0Va)>=bqZ)\m | 0զSxNv4U-;HHפ»?|2U_I_.W JcVAm\kM1u28-j2&: >_ ĺ{eFzֆ]4s%Ji%z0hZ)N\JB{N*A.rоA1e%!q 95ccIjYhxaᐒO-B6UA=Gݷu>];м(`A"(J-e[2%6o1Mڻ)^hZ%nN;3 ]v{EY˛4듆+T톫7Gʘ)t.8ǻFUTO`Z XŎI^ceR M94m<Sn-@ΡM@$^ Cth'ǣ6XAiN+Q2]1ЮB%Ob7L9:<ـdT\VDjB:o)I2 TP5oyJ8r#F`K KX7uB*8߆~R@l'%mAgQAV]FW[?ҳR5tЀp <0 Sa<FQ%%_VdRhr0N,AerߦZ2w#D樅EFb(`v"3BPd>avWtgE'w.?QPN:/hȀkBҎYc5Pm5%/A\+u>MCV< $v3éK=u~?t7ZPU7*r4MLPʘ@Y^Le6^3K7u7+ۭ>ɹT^;=W{H9z{-VȪ )p -} hcT%*n8*?,eQ/RhUUN^] E;N~XJ&3ʘ~O^k5 n /w4˿_>·sgO~_9#hN "caIA4$q0*03.:|-%QYX) 8`Mq7R`Û#G;[1WNҔ>c001$- h4 `d3αwTb,dI48Pb׍*a%kͥnx]ܨls)F5%4ƭ0ۤ*F~y.][Sn6HU2:(v2O$B3eltذu60V1*\ ߗ!Y܊Km"o*s+x)x&@08FfLYP 6-2:rnڵP4T0GM;9[FVhVhz\V'[RnpZ)Eoyb}3׆k.>w,KGAT#1j -aVhXNe¡(u3GU3U4nf<})T*dNl9&InSG #hC3œ?2R§"UUyƎYs(${$JhAxI*$$i%4m0ODHJzڊpO6*pvUkPSG cUHy[5Jݱ#yU(M A1~P(2e=[ԩ$,{w}?fe*$[BMUӵqzh0":6a՞Ug--^)Ϣis֡XCCL M{&ʹ>b嚧Gw?J Jh3k-!*+qjoMIr_Ϲ+J3)g_9O}^Rΰ7^޽<{6"53H{=8M9~쳋=V@?ٳlb/~o?dz|_+.cK34?]._?^AѼ>rrA SS5zgriTN/08I9uXM8 rM 400o'Te5w=DL;M_loQNtwWj ߲VY{-L+I粙U` >o#"S4k<[UJh!P}F6SI!3ػMoFJIk#hAjlf15E5ʤOѠSM$Òb͸ j. p˔bAKo$elFURwJ9PBgBꢃ_M`q6Y2ģՒ԰s%l,d߄bCQ"h=*υё{_HBr5,\Y񊒂}5?O"dˇ&.׏?]FX!N m$˗H"S`dZ?{W8 /;qCoWľv[A`Y]dK]U7A_$ZHȶ~!3L 9ap+]1$+IgQe@_F;CX{[=^8y E%كk>'Jϖ6RV7҇Qsff]ڄN( Pz3Q6aw0Kl>?UÁRBHpp÷1[rueN*R .Y)3\¡ vi8 (qЩؒAV: ~G<>un]- L308.΢Rαe"+),S[K J Se ᰂݱs6㌨ ,Sps0@Fr55NRiEA(㠋csP ɱc'1Ѐ lcـj0utG22ƕbƞ$D:IOSsB௺O!kt7K| |$1"u!&nlW7MBs$H> e-jNp h j-[(xx!!!0S9pKL*f*XaNL}4?;L #njЇ[pSf~M5=Xjea ] OEnaKaa)n̴SS mRT.s]V(tO`Y9E;Re*xIBU8Y"H%\p58a$EXbo1QBT K nz1VaJIUp7"QJ(C[ XV@9$*%r9ఔBӏrV4r.$9(S3B#JUTC&ƪTH(/vXNx[KS]6Ipwpw$vJvHX[ n-DS0Y]B{8± lq4.7zx}rG'&MEq/ Z;"B(A,[hwX(FW(+N)r i|mKtlW ZA-ns 3pXDOO^Rɿْ5[v7yI;fוh6Is{w_k煠11RћMMAvTBPAڸm@XC:uX.hG̯DPbHL w1B(Ph`w,?|Lxa[50=$BcZɒJb%ť9o AEd.+*sbWm|pʜ?iS`&7}{ /zb'oK†b\GU$H>ĴJ:'(;C1-@U$ QӾ!Jˍ \$x4fpCPqXxOcWsQhuTZ#1>Okri-U>WWgƛ!kxNJ/ [h dE鐥I8dY4AF IhFM6 jۈq15RaTmBc/7H8{zpV#k%x2# -vp=n,FFZ&j $ ]zAzIEq7ax}z:ZwCvQg!kW`,Q4mpBj_hdo$eXN(j9Vo]bOߓ}2a ZHc }{ExuV!ZxVXAM-=mĒDBHAt>ˮX\\V#rջa3"*l?+8+br%EAӲ!p fZ<=6J͆a}hY c͑>XO􂔆3D3þYZP'6v(Wڒ"=NhCbI-\DNB[8Jj!\"*T,BUf??3_Sl7F"H7UhTF tDRa"ucNE>" :tNd<-9l/*̿TtdCa X:@6/5yE*L,X!6)6ܖUɩ4Q`TXSR%l)ˆRX=!b֪A%.}_\*,dR4(KvAJU%z3A`w5%f>?hxs~2F B)XcjG6|1|ony{X7;~~ثwa1x3M׋|<=9`y )tiVqoWg_3W]:OL˯g9l ?^tVy'\AID-~v1p^GEЃJU /Dby#?uVouQAb-4d #CB"[CG1qT 6\5hF9ԷT(8ep|Ǔ5#ԕJW9?^ Bhn oS ?ܵehKCdHV%,FKb + -%(3|!}ZW?7^D9SDlQV`3 5BB(rp3eFm. B\҃dЙ%] aMsA*B) !akT>DxQ('}Ĉ_Z̃=ZZ v_&~\^e8oU㮜eSGr}q[yf^Z}%xlh 8#b +B#o]3r$؈ m[:X5|1>:͚ʵFy0swu)qi9w.M[{ kʦUv:7r?P6PXi b:Lhx?+ jĜ%D0QPَ=; p1jᖨ=S/QOnQG[Á ;OR8;ͅ sRӹƨmρ}Ͳ%!VDWWPfX ($"HyWog~ؼҔ跒lMKF )Ef~%:y xא"T3&bg#?_~B@Zh1D]d!_E-0DCGoa w\eC65a2*wn3EocTBpHx kg7( 7Ƒ@nuU~?dA!{cQRd A1_9e kh)X1T(2)c^aNx^ĺ~ +SNqDU)/>I l_i; BFŵH3 $q(bQl_8NzG8LnrF~2ƞ?TK%cGPC2lFXRL \si?j&Gx|T$.+,~sE3*aGW$!Q["ԗG bC 3pdp[h#67&ShF;AlzI"b8hS3E;7RG;'uXk<"Xhgv>A'o"Ba`)(BNJL%⍸ (NnE}b S|+ViB$"QϷs6=~ќ$k:a)+هה 3wTHcbOw|1w=E^wg{͞Q dmg?cZ=ym>~vfTWp?y_\wy8;\/ӓ|S=9wxq#8nq?w | ,:/siv[o4q T3q?R%绣!V_ȋIRjmǣfGέh6n˹~$J:iJ8o1T޵u#iX:ԍd >- ^mǒbeg߷غtZ^:HOɪ*+`z׺C3:=PϾ䣧<VDRS`2~:`I*Ghdžj ?&˼L@u@j6,N|>9ϟ^yG;`&~rh,m՜;?wR]o㨒 { Hwڹhs*Vz\BsxmV_lu}W2.)uL;rGTB6z@[U)s;rLAۨgPft)8ŅyֹLFGXHDxסK)<62gW@tm$b!@+:>)ZTPf︮ 80Fs?cYpuh( XѤ$7-U8ږ%Ib~\mN܎ayıj)V{ݟpzɓJ%D~Ƀ Mj0f{ݮXhA~oΞ3!&E]J1Mxތ֐Cl?[5KՠG{^-n"\ S,՜JvԎf(JȌwe|qZ7~woۅ,K7qRU>XCzw4I;Va\f 8!2**sj H@qO|^2%WT*s%%JHNd2]b9-|v׾`"u%bK>]-3,גNY5%~r0͘N׮ 19Q'/TJWƮ;I=(_^}mw2S.dbOREyU2JV&@*aB){=pSNv̢z : : P)2WjH_&2 ,&_|)h>^wPI͏3I{ʳTLʠtrnLL eX*Fz-<8/pR"̄eB U.TҽKJV!KG"cǰ ?q}8/~XCrU|a={[7~|G10ITF$VJ ?8?L9{N-_XW[ ߯L=_|FDdSmtL|W`$ & hbt[5yc,buyx3?p.G KVK_X胃9=ƒāq>jSj"9KK^">FȃwZ})o+.Q' TQ6GT*/!0:;G(ã+g]ӆ- )oxH0橂"gT _F!66@Lf[rP,}{_)FZׂ1Cl :|d= ^/uDN?_8Ǻ.geV˝eW~_PԮ(fH_j_y|Y/Z&>ϟƼқ##]䃶KXQRޝ-/Dt|JՑuf/?9NLi3_ oNOxwHܶTՕGEk~7|&M$3!Ӂy:}fB2|xN>2=9l.Nf#UyW TesJA(u*6XylPqn#8{A'-H-J KB}m yTM%lڻ/Tyw?09t31%,:m#7NG0%?31ց#G?hlQyYgp?Fvŭ m7yvٲ8*]N&ML@fj@AnɠA:3`>qip8sKU*#6 ҬωZ򞕹frL@;O֑m5*`m9A8CmoBUFepNE;_=Lb7%~QD | U_B|N U(ϻfz,<. q?ywjN''5Pp~ORp57rT8E)9rqb0Cm 3c׀pdwwnfUTx%}Íbj><ڈ.T X5HbG\0&* B1CL[TqE) HiK9{vT:aχA =v>:7):gy>W9B1>!v+|@0\JΜٚ5lIJVĀ߲d&ۘ@֐kC񜽷х#[@A{B.'*6Ԃ6g>֤(ƓBBz-*^ͯ{t{a<.E<J2KEu@X՟l@v_4i;/sѵ!0kȩTma^qQNm-8I,is3^JݞߍbL[<7Ȉuss-GKV S\aK Ҳj_Q$oW2[ |%^{jCg ;Pv6o]%wh>^0 VbNOfAZFm3WoJR1ǸWI9s5!x $rNo֣M轰륒Bb)JqgXӱ2/gR=yXF3VV8okygNrMBl5M@y>%źU!$}Te:܋q j}~O2I,gz[:YfS,Uk FWspH#ءr˘GqQl>@+lR %]z=Rs"]W~|"٬@uAmg[v<ߍ0` @M5X]l2no1DqQdBI,ȥ u7}W K6ֹ5]MhfJ$db WFm#sߦԲqJG;0auW݆WN+UfL&"7rRʃg\zchO$FyN`lZy¼Ac!1mfUC):~8ӀCsت}ȿS#KjcU2E}C,FI|bU%ZF)2lq&T0`2ČӞLn*tp@dzP>RNaSAuql+-+nOd,[G_Fr9VXY$r9𨎇G2;nCR -oB@38y3xء Lþq86=#:/*daܦЎ va3 ۭU)ů/7%lOeo0Fl&I:=?BhchEaqVu4a s8)*cz~a0S+b9W2T8LngV J@Ańa&G39ڮ"0KY[( 3X9{SrOFg"&dQa'r?վe_V ZKF{[,asІqWЏG(,- v'~M֞Rxbۼѥ pL-7>%(OR|5ZJBPERSs|RKӡc)&0W<<5M ݏkWS3=yFh2& &Tń}d\ZV[zʨ*([JNrƘ d`a&88'+G`Xxۑ ,Yu%%[,F j#9Rx8KJPW`ip=jхEarčUQN9]#.)hmk:s^(ؖ9'MT1.b$GEtʢS|z~^ϠS1>"-jgzNyy޳ڎ˒f5d%cj}" ӗ?yW\GR͛?xj=5fMH@ggۦߐ!OWL4?ogc_w%h;FDQK6kzW]s"w-q:W:m#]߈ >nO==ʶTJiJΒCR~ҩL<<@Gk:2W-R%HfSDCU4VwЈv]WlQC7f`7Zt-:0%ZF`u Me%1ɍMF9fș}F2{n#$SMc;}}t]}8op?j_O 0$ƌ VN Fywg4ؚ!:J{hscVQm}HR D=7}GRx05tJovVv3[ lz)-⎾vBKTGS)k? 6CH11!Ҏ⏪emqN}\l M%!`jȜGt6WLWʲjئq-Eb}-3owA6%>i˨|wA&K< W>KG^׃=7X9ӥ8].%Y]NǠWC>KW{LQflC:A¨7Ib BJib85Ymy@we) q)f鬏tCSQXW$bps>' G `VNKݜwߘe`?K& v)%3#I[WM}J?'^#joGuYNoχhEey<̪u z2* s\P,hj#}Uе9+z7_c ;f"!kwH;Xk̑?* *$d3ei^j[k]{@xq˂#ܬ9} Z}flFd{ތ^–RM۠냾4ߺ܋3Żsۖ59 52ۻO?]8O>{uyσ?^x3C?oo>/mLO6ǽkb1wAޘ1x_W7yqSٿnNi;l~_lG}q80;xU򸺜jq#/Z d/e_|:7ﷹJm~[}-D0DYc =!={eѴE6ݘ9" 13Bxy!`иE6]З"ȃV̓0;z/:M{XJ @6)gY-dB$lHV(hEiY6f.yE}mfoBQ { ?%`g;;A<ۯ '8X tبݕUY:L'U-[W[ 02ǧD c:O[188rBDMj$C޳g 2ټ}!g,Yx6bFE|SE^61Ex ac?9saT i^lUYߘЧ5%yȮ_ M0ԑW!O;殶Zp0šuzwi5Ֆ}(t}0a2K&攛hѧi ]F(nKof{揗ǵ;̙d(ښ{)ojo*3NZe2HH-yǩZcΦYyi O%(!`^c63ES IjB6%mB7%fbC W+>;Q\c;fG)vߪnpdK: #E2 {nAmuGzZld/u^Nck^~Qb#ŴW=_g㗃֔üγ'(n~-y/+e>(}_ !ڮ'Zg/=FhPsTr?cF*іX|&,SW!F0!-)9kH|gǓ  ±46WsY{0[t9Eұ6*㘽 4{NJ#jj/O\G=TY zWÖ䇨wovXm}hŽ=6a:\1PpP咎Ͻ !{/S@j7$&Kk`6jDێQpeIwgT1EV}z!{@#'- }g%NKqԆ 5Vs2Q0P֎ "£!w~0dYnnbD8<0 [^H[CIȠ3%et6>G}Ӕ9 M0%H(A>[\(iU"hFh޴gSLzsױ#жۧ^whčWo➭#wJ~}΄{&Ǝr4O5đQmxIv6d:qs~34`Kc뾜!lV6.&g[Iגb9xHTDۈJ11֮3*c,윚56FQ6:Lx Ȑx*^79:z_p^"ׇ"~AQ).Zu.wƐdV8nQ-d#_3adݎE~&6n2J&)g@KyjPG-GjZgÉrlTN'o¾b$Z'ʊA*]ssP0#$ȉ/20lΚKO xL|jsJHvWL&6Z;fv$-sʦ ;ߝ%{7̴~}kQP#ghL;:Wf|Ji aCahL\*69iV?a`x%5֍7ζ5 Va/^cԈޔmdfC5dT/>/o;D¯_}f ~˱wB8ӭ>܏ʔ~>';I\mk573$c{#aC.x#E!i)#&?ǎ'z0g UقwANeo"Ü2\ijS/n[u% ϰm2ZwTxGi:YbǀC-!猥SJ-+7sU(џ>f6|%NLן\RKy70:>9_\nDdq"j >zmcUluU/my3i`U5R~\&}v w:O 9$}<ރ wkC.vڿP h/m,>·z7~ @7<kIN{L$X(}\zC[`s[2MWX}nôgBN@zVgW,55No2 ) eQ.oxȹ,V命 !ii#c[ha)3% }La6C&-B/k vo^_v>? $ð ~7\80 Ô䣘J[>ʷK'MLUŬ>EcpnTYdLŌTM)Ԣr.,zBԵs\5s雗ϏaGY R/NCJމHvơFɂ޵u#"iFɢyb`3dY-N~~lH>-ٲN }.W_U>$S"gȺ褑,[9,qu ޸j HKhiP[ "GϬ40͐0uC.h 7\@Ao)UkLo[(?!G$| nBQ$ a#7$G*׍ 7\-z+HD\jmr򧳫>=0-!ck>nW!,sg aN)҂sl(2D%RdN~%Y"~ +kT]HsKes*< h`,g2=UMOo$܊*=Yy0:;GeL)ܺ ߼2kxbuʿjYSے߷%dpbq}b'~Vw{=Ԍ Pρ_ܨK藥Guf?%|>|a1t^UNݙ;+K~7ZCv.;\]9Zї˿NGt.\wQ~Wӌ-exuqy w* Q{k2\nD"={gNkgXq)ZXrBJ̊2\)Xeߤw@dk1R\IUOo=L#|%+9dp:i~||VmQq1:jZHRe);/|5R ;wq@ecC8è\\ ͆T5bu,Q:de'T>mOgupx9\)XݚFgNf3- !?tUQ5WrӥZHj c eMCoztyt͗!.fd6 ˪Ø!5]$-1EiS' IKI+K1F+2@Zu1J>7iPA O(M?x]~CDD\B5 ([BC7C3IJrCY/<6|+k.v u0#4xzk]'ݢYܼ>0q=Y`eݰVc#pM(3∄u^xuAz=Yta!oÆ,>*SݔCz:z㆏ Vl3xL9ks=1N>l 1\IFF. ?JPA~޴dx]Ki8S>ZiwFO> :1~9xxcz#(Uo3&/aBԩ[vܻDœTD0#/=OnvI9_:{UQfG5Kvv ^i~\BһOK\_>ΜznҶ*vP?YryTp|TZ Qq>$d! \@>X3bH|13wwe6(\TZ)NNMHiJ{A˭77I'pČie7x8 /r݈Zl ѿҚWֵ^z:gp7+Ԝ c\kEFS8#ŽJId8"숋itU#5Au3\OI)hUx9uKxksX_W{OM%&.Khi`&_I}GMa3QN$mkvolf Xr$f~ZiOL.1bHzsĴ7IMS;k3&jp;yCmj:D:1)qnqa\0Fܣ)9ՈX%W&+z0R1vb߀EXOLȿUJKwzEzRHA|fSp릸}ޚ!'ѱ!rݍTy-Zނ"륁lbeHKC1&a>bKr w/ᐪjl-Rjc,1)ɜvCi6$.( 3q8 ,՛OuԞsJ.DCt,FID6Q4-M)@:;u Ȧ[5Cûs2ˁyTг`MC<0; ODTZov~ 4Q\ plRca\Iumf V2?֦}rfL1 ,[1lS[#DZ{dxLn0ncxێ=a9N-Ac̆Ƹ7A([z&CzLEymhvZε4Z{lԢ+NR eW9-&4p1}\V)᠚cYiw1>^i *Z}6/+)I8=QlUpbJ;5 cҶm_1G&J[Qcq(7|n(>d@͏M~>(݄ŻtD oz[&&w3X\2k4M :BUG Zpo2[P;G*R'6 f!xRpsh\4@=m=4@xNB hV.K:D HPK-v"p}0V0QpyL!ߒkkQUz0F?X*-.ݕw@\h&L CQ rE VUQvZ2b)#eeV KR g 'oM# nTtFD)>ɓ,N3iȅCku`Ů΅)c^++kJ'\f1QBp_R-5U5XR)Ѷ @$`Un8&Ɇ.Os^`SD/G5LI'J\r3-4+5 g0ʇ('iP8BN*s0"9"<߿T̝#a3 ' YBuh˒Hp=1isT+)Bܱ[*F! d% m!8ol ,XJgCz8RƞbAx3/h*)ggޞيz`T! fS^X,jw۸_ 7X{OʦYXJjXR9ErsS=鰃`HSUx bka>eo7o~kU; #xpI[Ǫmmad3ޠLm&* Wf`yVj1i.&$!`S1e э2}Ԯix_/g1:klyi==6ml ,<{64A:@Z;^g*- C[P_uWo<0 5e+ﭠ7]g}3zʪE t(ZF gS(K> 2gp I1Sݽo!lOURzG eq?.<ZAɇ&1ohKZ[ɚȋ-8.{8/pėtymD뫃=;%fi*$],tDf֠)Zgi諵e6]m$k=,0ŗDWkmР+u<)4ŗ6!1 B!7k׷슮Lw؇n/_GOy~e'9 z&z8e̠ ԇЯ38g_//ze(0K?E,}t=K-"Uixj%=?v=}:rމ |zQwmyCX2Ol_J,~|~g}rv2w>,W=r!ykް-C^: ?}wׯԫ7[|'ב!py<+×ouCݎT+Яأ0G5_m6`-SpWkaM:W6xc6Fղ ?J,W@޸H؅CPn;WpQ6tڳAǒ[AnUנqSo%5 ayj:bKnhݖb_BWSj3y7g1OǾ[=maV2:׋ˇ&o^A,/m6*Q5dSM[2+0m"jlo{v0D8Qy{H /(\8Md';W`i[-AZbq߶/m_3ܒk E9 * ,EKw@Σd]Ci{ w=_|x5ܪ|dL ze6(\T}[885!)b hrM_szGgkm5TSet l6WVk܌fiOR}_-εV ĢɛaBi!jj#9լ$亿Rm=.@ul>71R-/#Wۖa˷-ɛYZR..^!'x7)꾴lXZ+Ä}iYeoFZc!Iź:B% (k))&<~#`5U8JVbG@̗n:Dm9PBbcZ7vX&QA?`0`:gFȆ>-u]0V7>BNQ>r2k@}Jߛ`T fXn-4"F  J6Sd KJ0@@>BLgkRYRE$w箙w0ӂ AgTq~5e% 4rԈ-+hL? ƒ=@Rf V-(q͠DNZG#`+J\rOpD@s[S(Ns)HSyo[b8vhv `8hRܢݗ؜ KLaYbeҾ_9G6ZA k1C>Xʈ#"fFLz ThYtd95(غó[AEL1i|` @Tml^T_|:^rrE7 BB@ZިD t\\ʠC QyzEb`rSMN8n`SLx ˠ[=hhޘ:etSxӘmMh2tJv|ܠ/{1\F$P2vG_^QKf/])~'Fǣ[Ʒ}\΂I[悗 OVR=$} }Ni\@,)BV^¦'їtyt2}7 Gt2ƷKe[ b0Hh`U_fz'':lw;m<+7ucT&&htpF.AS3r)Xj ;*ʹ&1^b?:P1\߻Q'w_\^d"1#Ln_7+[ukb.C&cAHiQc,XKn\h LDf'q"692&jXfXoYX1MMqel7x&u =h׈6jv?'ó3FO9ӧT!j⬂9Au"= aN-_ 2Y\g=;N7:>,o%& -)$"!O8g2iU"J ƹT{-lA9gKC:r%9j W 5Wƙ0ыh-r aeQ w3T3OfźM$iuw@S)SsQmhvY,M_tRiK.$qګX3nmxu>` j2…xbXUenQ}4G*r1J!ې{r2ǢMខ91I ]],frsl-=;\qՠok'gRODv9Wݥ1j ߯ufs@6Nb9!@pS'z_kO;}mU{ߓ #_I=W&K|g8Uŕt7wiLb5vp ;=#BӾAuԸ\< 4@fI,حgӫO"X>=W"oZ1j;-;myj45mlV+F9iY5ʅ5p@@($@jCʛIeP"fM ;WET'C!{8; [37+<7DPBlA=LEh3*Jk=?sܶVHe-Y76R a2ذ |Ⳕ[CX6^ڠEp,@GajXd":@F͕w#ɕy#CB7Beioj?PR<L(nf܄SJHIM\)خHgsҿuL(A_>l!{ԝB}uq-roax[؋oh:ޖtlS2Q-W ?,dq{c]L2W98@RpԚ*R*F6cJ%aRj:e'/rk浴FdKbXl]LƨF$8 C = B(n`ptXO:"Uݚʵ-YfzT7Q BeVlsH*S$ʥ$95ϜKa-g gm}b!y=*eo$Ƈ~e" Q[j\* #}]R}\ _NpͨPuMпL'"CT ruPKYKJ㬔 WNU6{}`1ݝϘm'cSrsR–&o-2A7LFimxW$ze9WmIe~iEÿ 2}YGȼp Hiqb%uJʅO5kSWˆM;{m-{E{j-od]ߝyc+D"_~@i|1!@x /qW@6r [E ,ԤEqaԡ܎8fУ<ؽ C|L~(KُQ~(y8mnPCP| &C/l?x"xP(y,[k0:7F쾢8lLv 0Zeb{*~ee ֏ 8qyC߬]2?WnG~/-,7x%CTx˪q!~jk;WUv03u&28k= ^~ZrEZITa\QTTѾk g˺]ƜʝMI$X *uh5kو3h?U-*EIfnaXɫW1 b= I(Bطvx@pCX!%g5wY:00aR/$w\(ԛr@l RH'i5l81hrJ>qx"{1n(TRȚ."d;͢/1P F.DZ9& rZ[L D!k|sDџ]! _ ^J;G˯vWCݐLK?|5jVӵ L/%ȭ+柕⬉ 2vz3Of?ߝ Oג]QA=ZqDfC"O9 `[?gUN|.MybyF@f2YOKJRANZ<QgHjXzw/jvIkR<ﳌZӌT  ˙wmI_-B0ě;`8njDI:IJ!GH'LWTU"V%T`b93HlH 8z:KZծO߉cJ߁"ެkWkֻ>GQMK9 ny E5mꉓ[<?&hG *>O^C#Þϔ v +`\ݼcaϼ82 mjU^BMѨ ڱ4XyО*nӬPv D֛ ݜ:ZtNYLUh߀ ډ 1A;++&>,Y *[h ڡA`9 +oK^1qEv٭to蔫:zc9YD@Ud g]Orǽ;af}޲o[/] u;+~\MXQܖE*-喽 W7Bu_&S__ w4i{Pk0` ;}PSۆ ^kGH^4ifFoJ0 ;wr>{H( ~o'oc$~Ox[Ծ}t_w3}n~ݱb<jǓ3%+B W`*Ux|c5G~ /Ju\򑯧w23j+RHVsykɘcntRr»M |Q:p bRW/լuJVQ)Eހ"\|IY ńq-#9ef X5Yh.wI~r19wMmiNd?JqƧ寒+zN&91 w?+qYH~dcRSlrJHʌ˘A6ɫb[B-9()Ԋ"д(Gf(R:>&U'?ռ{ҞO<DŽ MR7OHQEI3Z ofr͉e~$u_|kk{oz ?}aƭ,C앺I 9y-|hCmLDX s l :٤Ȧo1$W!RDhgZAҢkhV|şiUCEWRCϼ 1eAVC3kdMLUdߏֿ Q_b<{J?*n'.<{?s֗Ə{m@{w\=8Ofzs)Yƪ&vA\Y-z){DLˊ3fE;#[5{0"ބf]U0.]U0vtC3 @B묕*Ar&ALVqPa6٘ڳ5 dW Рlŷ-HpVC} Dl| 7A]tB+] V7w?P!u`: Qܱ `MawzKa䝆g-Лs=l9O-WQdQGmkQaۇ;hlN @RNsewB䙃эG96ic1bV3 TFqGcc6ld6:%mC-* hѲABtwD64/>}8gc ŤB6Bn(|ByLѶY *!bp3_~8=,1/>}8^7HXm/ɘWHnyVi2DVMI%ډE{-v6R@3Y WC{]&FG8>czw^XG먜cs9>bzsQN}eYZu)?b1/qG(O~9P,?Hz|}_F{rpjoeހϓv?2{嬧w<_ThVTDY1baaZҙb5֢d-Jwbe=);yDvIH4Bj٩V7Fdw_&DMMxr#+"Z1joFd{[ SObf\g"k_COy-i Es޹6Lamjk/OV"bv]MQkf3o:eŚJD]uaxi!ۗ6 U`SꄎE?mD*DZ8TZMW7o+6=xlI6ڣ1 [kcV8(pj#S֏7'島!y1u4-?LwR,'LAR~4­LV>}F7[/|Z*.mr>f9] pA^닛nj٩x\[bɯX,!{~ocP;%ݒ  | -_RZE1\oB(HQ0+~bt;ś_QHwԃW@:y/嬹+ {QC?w)Akl+4v<z#//4W vv_o,wvv2Ц[W'ӯ<.{W=tۣ7UWmVoM'Ǹ1M0 eԈ]G;(|l 2TriDu[ 2h[6x4;4n#;Xm"\(R+ յ?~_gO/~.K ?&痗;>;;o?pY#kvy*IVk:okKO6_ݒ&9tHɷM.G_dMy9Ȣ&>yu6 Ѳ{ңRjsA)LѤgPʬqm\ƥqm\ƵILml3kEwABu[ :6*675a9BfX֦lѱ{F5PpO4QylXf}7Gc<~Lx E G4oR:F ɛ2Q{]l|c;M 1ӓ7Ğ!"{>O>P s"Au?fkz¼50dCd.ΰZyBdԒwo r@~HDAN7dFNDZ7 |O~hZt_O@D ˚ɟ 9y,v2Eמ~ַDnŒL @Vrx/Ďy呏*y uM/A\`[Ā$k\)35D!;~9@"-+5dފ&+vVҰy3I/P,WÁd^el>M6 #N–MHX*gSZے~l d*mڟYq)؍Kn\ vneݨrB#d btQv[4_;,!b {^hPjŷ::)!>쥅!&6Q(@ &-` qt} Pl8cp 䏧 i[q5\4۫דHkƓ|~gɷ=[QIox2.! ŻҢ5=ehδT)iw\fIoJ|ߖEn,h>FDg&q˿gzzw`oop}K)]#ӥbROŜ߿T^գɟNt?sons/zW|'Y`Zqyntr<6)KMa\j RSwk ]t6V[klМ}tv&"6AkI\)QQ`DOʹ7zAbŷ623y#0{kqGJ?."G0Hv fnР(㤻c;[~E%ڤ(i۲U*csfd+h U [zr@"m%vLhA5 Eih")-NTVNb'zjtD Ry4c03Cy9^ nH4BJSsW*8& 3**,lP S H1bAݎJgYؽK;Jo ̜fҔ1;~v ́]SԍT\]!eoZykaEQjcxt9Qi 0ksEY@ǖcfJl *D=3UMMVT>~% JWh TS)(]A5qq4LrqA-wqF`1QTk8Z$o.3O18܊JHmy]F`ϒSKE T퐞=|fݟ z uXd2NHyg"q~>B+$j*5`[a]+LYa* 뵯z+,;Թx`"ƍ|˪a)%y`{Zgt /-oY~˺[u}˪re K4eK#q1+r\EYr .J `M}udŠ+YWiiM!O֍ SokSm.3|mu>;ֽbYKojB~7@qDk]u9H׃M 6Zt.Z]t.P?ڞ`I}@iNiלkCrp'a<*r1}Ǒ{A6n^urm>1ͯlg!tRp3'pZc+ԼyIN!4!A 8fa5$0ۉ2 q4bcTϣ|#`Ό1HNN0>:GJil̑YM`(7LՊ=ԕ; /_Qb ? 2 O1q *d0u_'>jkt(x@7]c}3VQ)çaƟpֿ̜qdyf\dq;HBݹݭe0~ukO;ؠΉY+& OB9×5[*χxig^-NIOqdQjԇDܧ ºSh@Ԩ1A'ְ v)2"!YZdxDڊ ሜΦϣß9s4f; a ktn!?V)G L2t g%) gmvfMX9'/E<rJ\2f.$Sȳ}׉ e t1t6~5Y-F)ZaZ kD+Ud'l63%2dOEupihç+ޏaD&i7IR(Gyd#\I)e4զ*\`*p7LsLsm(/v\4E  $y(0 N|OX_)fO|xʟ>U3Y l..Yi9iA?e|cI+i"O-DC󤟆w?B $h@ZH<>ٖ ^ ®F5dEɁF}5Fg?'KSD3:o7<KM:MN9qki0ž<}yj7D\‰/!VADrB|ѕg9zz[X+ݽvM4$ $Jd- @"vZy~7d'b{toj*{y l+qvMVb8JDrEG`'$Ev?yfO3&i`qM 2+)M,V,)4ۺe<0H>| B7xi:|1 Rr._ρ(M ue D bS;>g'2%1켎ggˈ> sYcrX 9:ǏA8gT"~&"{t|Zuqt?iNJ'q@3zӵ3@Ga5q4wHrAߎv 3ZEKwqL]q>TyBr2pr}0`yYX kԶW嶮\]oI|+%ڛqEJbV >CŔ=ƚ&0fy$ "\BF)$v6w4l5y }c5d.'t(9a'RŴ-nS 9*X ̵ܴ́-F9KV%o|n*Rͩ@?{/]H-D*i!Ap ;I9,D(c#5#LH&HYvE)rWA\ڝHDc4ΌW ~7 =ϻe*]f| 3~nz6skxn{GUv]=sq3u3#hZM1,7D]D+-cNF4k[J4\#ɜ_ v<]LYcH`U0+WH#T-9o+_RԘLۓjMLp)r%Km3> ?$ks'b++yaUF3̔+[(mς%M2AP0årAEb\ ̊ 5I՝JQ*O:MyISXc c1%z$ ;ږwV'c&.kL lbѾ%3Fr #fLn4#M ζ. -}]׫ۑ |5vŗ fU 75Y;)r5lIY24u-lnFpm&,,!>lwl%ZQqo h D9'0"!yC܅Vp6xӘJzHVG5ɇJTDAoVC8Xw26T!H+bCU ےO+@Ī]3$c2 QĻ @jIc^kEI^c,.G(lᔰnvT:Go'!V.a%\gxu@ÏpTݧenG S1Zry'%S+s_EB)M) ⤄҉X !e"ؖ V dw"}6Gp^n? iY{+cb)Y¸歂ev+8GfgԴV>|9fFXW\E4OGKoݭM^`y_2fҩGɀv}2I,8%OE&VgIdSfH%O5r.:yY%!75`>s\`7*{V ygb u] {E0cQ-C}ۊE☤9?7&-|Z]qzVR֎R;Vӵ 6.:96t0M)'6ߦC+a.>9}# {uɨje4׉lY8F F~!2[RͥTP" &%ȟ6OXs1=uI|7y?"oюmxC9P<]O m>[p%T.Nq-̔nѝO]5MwӭDRDGNƏڱM]Lb$-VY%"-Y.-ߪ:`&B̨~WZKvx+c %>kF#t#s7?N{Z{a`}"ZR˭20qup1</.1Ŏs//dx;ʇGJgoXC`@wGc/É[$/F~ĕ s +ӭY=Mr2rx9TSpjbl?-o20QhMl)>G0YQc-,"/o:"ӚV[]5h42wpZojp[3C7 ߾935W|8;gqp$i5+N~^j!D^eFDj艣Ud]/(wAS2eH]; N =4{g#[-ñj.eβbBHlkr[|c)Bbܒg(l6,/ovFkM[Ӗo1!Z0g@DJWbc1Q)O>5(h܎o*noU؈]n2yy9y]//^-Zxo*6彪M{oGj_8ߩӶys g 3`urH/$~Rvٓƣ@it?ժǪ޿+;X^T_5"ūOlC^4^3`7/^Ӌbdd->.׳a~_w^>|VM^Of0w+Lś)?{D7TGK|f:@ӁP>y,"nlY,嬪_D#32_f _V+ӯ!<bS@RZxdgW _fw_K-랍GOM:{pu:97oKt<]9`kp&:7%l-ҟz>^]L/u*3\ZDcJJwt߿_y&R^].=ʷ[~{gqf05ÃoE´Iz>GH3(׵]؜`ӿU}ya2=}F|:Bm?y:_~zv1rh x#Fa L.N,qP3Knڟnt mS$f{ t̾{'|,א+ٛyvRE{#e"/Xē"WNeߤ2*Pevf7s(0o+ݖ@#̲22JY(ga^sBI%i1dhy*AHRqwΐ4f.;czkس\sF ahHkB[ 'Z#" *pߘ aViꂷ(jN]`Be%,g)|׳ JIs^91^- ?دu*(DSpx*4}G_=">eYË}e(ĺ0U#o-g::9c9+1Yv0T9禐'9F^#Lupe!ES߹B|_ 9k-fBrc5JzXlhzڜ usFa/E|5+Fڝ)K;z\5pd9]RnIBal #q\TE<30Fg,—`hwLc& $Sw[X4.QD,wwks1CygqDw3[}=¯h*]ˣ유de3Њש3KGk)*۠]..g`\ͮJvk#z '.PuiS,p|\mNrE0|ib2p DcLQc@ C'֕0ђc\@/ KWp<8&kco`$!QA Q "AD0`zAe4NF,j,FGR!ȁ z|^<.tߣS j[4۰Èwaĵ؊Ca2*Wj.(g L ok)T}0&i"LTE=wQMXc@kj,KGA3^b^2gbrcLx%GhqHNH_FdB HZW(>#0;%^Hipjjt0tAQ;i"<6"asY7=0FZq2qRv7ha#>tgq^!SQEM8st+7 Iω`#$.Um&3m')e,"F e)03&%@gjhVx. >$`. 9yAꎦ'eizF1$+mH 06%IRQ e/RAY :lD+=L 8H( LxdJO1s )'wq.r QEYElEv::~32a>IJ_O̖^|`z/ypO)_<}vX=e'OW-3^i3/`x__sh() ~{̇b!<B=#3y1r,prꨝ>DH '1<nD8R#eAwHo f.zRPapL]fL")"A9,3;Od[;h;|Lp)D -ME`Lya$F{CNlড়тPVJꐲj`(Sc'\K&vwٜŮp\9Bu#,F@bR9Sf)T6k.5&?B-/u`uG8_ja]/CHR\k%p^ !Rc&Qu;JଃmT|@7g эB/J(b&,oHB0 }]h ̚ d#lm ej:Н;b͈{pbG8c3{O2wʑ"ˑj Ѳ^ ̋Q"ʵ}q!dMLHY8KtD3{fq*ѿd(jdJ':T`My?ٵpﰒ>덇##Pg,]lygT>5ap AK7v#ڦdpe% ]9{_.9HSNe.A۠aЂ"RbbHx8[sUyj 49 < UR:=O!qm,znywI[}@̲Y}Od\" ׃k`?M>7] p;_7'JZ/(I?><]׷ ZFo*o'i[ &Lmԅ+f[5.Q阢flOJkNko`LI,iOHEOˣSA0Tw핐]ˣBRNF] A59sv+u_{'r}{A{4}ՎʹHrGd;F3HO!J;d""â԰t1c,&,1rK1`㍏\DX Sx{͋>+5}5Lb=b=ׂ#^3v؛L(y2_ Z9p>8ajɧLQ׻W{chj!+xf}gUdg'Wosb=j-pcI)7䜐HI 1i- F< NƊpE&$w?IA`K'Op^)$xr1qﳕo;רh;V#>nki1Q"KlJҀ30ڒHcrzc{ ˔,z63ND2+yO ǩkyOSxvfDE`uZ8RIH lg To|'N@;YkG ,T;vE<АhL; &0Js',{ƙЄiA.p -h; qHLV5p`R0C3Ra1CF3A*ERm ig̕)2 47U0! mRA f;Y$*\9j ^eIP9#T0RM]&  t+ì!pmJs3o21OK%Q $υ#pSb۠(oLUɢ'TQ0)ж`gH GfI &fmR&?QR sn0gz䶕_1Ƽ/$Y`ADI$g6]iuz/x,UE.`pUڤO瀤%?7)J|:7%HLV 3%vS^B^Bsh"{ q%̈́URHꞧB oIsB\ݿO\Qh>=~&r-ȝ2ޮo!{+?1d򌹪^mdO>r)(;m~D?A[v9h^A\ǗBMFۻV((0Qøb G|犣xL@ɘ:}UɢUXnhp̠Tq AlUJ҄>pM-wkB&!Kt"'p{iOG<x2$R'Ē֐FczQ.G5iXSft m:JECA5 eO1T6`hDk+JU6me[yyH$ jz:A@UK;VY{ yۡRB^(ʒ,'^8:Όe3ƒE>\LP.P̔ȂAua&?oy=\.5-) jnYZOGqTAJ}a WµܴDNR RچMg`k$'ש!bDX'l??$ۋl8-K @lО%J¶MP r̷@PRw 7+BnLq2#Bb.^5QPHuJY2jD˴!O| ՠK8GP.0ua[c̷tB3πhN ,:nJhbe:cXjuf׺А\EtjTԺYDs |T'v6.Ŭ[4-|*:Y`RCΓ\'c{.YE?j77?߿[ݿwr=E_R|m~$Un }Z0:y;9'^!1=* iYo3WyE]Eeõ` *6#,#4*_0& (BV5FѴaUnMkQLtp ^[k#I`bd($״p7R M}PZip'qqvz!$D"rFJ :k uC+@INP![eڟ4c#ۑoJTVExwn7ϣ5r(9mHSzG w2Q<8eƌIQBNR01#8[PۊvT q.ށީWs+ \J5WmAB0:sH.Mc9P۴{H ƈ){tn0s"T#uw]Zm\ʀ2SAG*׸}LJA9rz3ɡ5%45YZֺja[k;H[qP? 13qJHSe ˽/`@ Bh{0;sU;k~NW L@aQԙ_DE\LT>;Gpӥή=.vAکGiaN >XmHeKlX߇02֢!!xxǣ,Y V h 3EP5.~ۡ! Wޮ]%{XWO.Yvhh1-rʙU{6dEnoC-A.O9O˗ku^wh3/w M2WwUAApEc$;Cf Z4Wͪ: }Й83rQ3zHyvo@@x=|~'ER˟''L!%S昬TCŒ}zli4, O!svꞚ`VXvs!x` ؏SleX (dJ 󜷯,mRZ ޵Ot$Ք,uݭxMg. 3?9+} Kց{ }KPyUқ@ Ho[[#) w4!>{Kp*47juh={x$0Ɲ}BtE : kg3Ie4XuVi%(S謢-"Y YE Gh߬Y\YEexUnY!9娉HdċfUZ,;>c@iVi,ZTgV ϑ(-)-Wh¿J!,ᚯ_ReNuTt 2_5 ~NXVF*m]B.uM,!Iea2ES-,]=@x;p)t_# O(XL%PZr%+H|!9*h[M /%VݻۇƯ>%bӥn׳m@3_#)ʈbRWPuM"oH]1+`~V5>$=~$ry5B\zrExJ7Dz xAdQ1KO1B= xoon}㷷dAdӇY((~$mCJ3/*깞!>_ƼTvU犫13(Lݜ !pX=!*H߼~#pr&)RzӦ҃Dük/]濣d88ZܩE'm@L:+&TX:ƙiT.TOMo1KAN~.墕x2@n+jH$Cj^V׫MUa(r-9l!%IY 'HpRSڪؾLI`dC 5莨Zؖ="BxWukMTk܉_0`s |ƎyEAkIh.ghN xGĺEXXN!mF1lvr[hF}[ U4Fqu3 gn2Q1BیCŬ[4-|*:Y&\w_~~FВ_!c~ ]n{ܾӨ~x}u&F O )nT2RUԬU4,W2HFF"QSI ВQnM]qT%83T :`50qYtW]?E2DA"LgaRB»[ Oױx(fCEJH&%<WDLbr=1gJh}$hyJds/I% \/$U/fOl{> ,, /JsO@߭Q FPكaz9)Ds3r#NT(t"fbS MBnvP$ΕmkB4'h&նR}@?z?ۈBo~۔^5.Ľ:7RGp`R9#!a 9^wMϣR-" !4Â4XdЩojE!f6Ep (9fqǞ$5 +} afa3(Scu+ۿ{{,<9P7PɡmƐ FIbzx?)ūAEtF([_]zأ=ˀƸڪ=ɪijetM?\2-VqIB>cn U$`XT<cy^\*wy^Fj:IG{5Pt/vHI&xols/8όb3tU3sEI Y)W>׳_&TlX;vBX';4w˄ 1cc=B5ܩw <2r mN\/Mq 't,`+gSϗ@I%!fj2qborY8tEBݗFprkÔ鬠ɥ#FG?.Շ?dxњaSj,d)~Vn]Rrw=3J0(%xuz=-Pŧ]b`ƋL\]C"Ӄ 9D>uPn [~?q Z~4@$H`!s9!TP4^MϧA}`\#`ag`8jöUlJjkgW˾R䒔=D I˲UH1tttZ& H/3wwY/:`?̲.gބ(GO%lꍀ>I^$2U[ɟNsd8|ruy JI.c6?0Mz#n#/x硥d&v?]7A2.t,ћWԌ14?!`K(iJǖ{hfmlJh|Q b[\,0{@1HӄT\C3R o 5 N?r(>B QVA*:}KVcD2I [T]6TwEw6 sO*i:P KIIE:ky_/pE}0ci( s#alH5b0Jkݤ齛,4Hs#\&(n] 6l 3IX = хxz5j|K$!NǨB F_x샙pKp/:K>dAY%$K,(g+O?Lȿ7`bIo]0[" 0yp3XGfXacR595 3Hظ5JPUNGvͼ`FDt0)&$LbXbqbiG턲ݠ f\+lk,3@|i)w c]0h‚@uU4@9dX{|\WpDj#$(%(1L<:^xjR(8MJ2!݈1E!8zϜJG҄2&- E @tb'M=c%34?X=mVC!i ֧‘.%2.zG]lY>Zm&s_ڜӾC)+UJnquJV0-]Bl^l7J́NBWO;C3_`~g\ n-0R!$o%/c{ug.L8h%b+prLT~͸m0)6z_Gn ^0a2#/1ӟ"U]"TMվ мP&Ճ6csF3/ڸӥ1jF{ 鸂ɋ_m>8Kv\N% V<+x6?FS_  F; ˟f08W`[G3w2'`LsLL* Co W#$Id,ۆHɾG/slJlMᐅYc>ݩnQ4)"oN{n]? Gm- ݺjj!N4:a ' I vzthyZ3IOqJ)WXQOS!85^aHàUoEq-nLmAJd.Ib!ձ<J0 _ c X:I)1Xw4Ӕ 2͖9ig ^oMA#BgukƉ.7|mYں7voC v# +NJn@0胡#8psXU`x ԊRñO40 "ȂѼ!KّGsxU- _5GVS$>^ЌL10H)ŕ `:$ojToUK &(Ql F0atrQCظJ۠*ϞCnWZU217Yyº7Ya:Caz٨KrF4sڍ)b}I1bjaW֊6WF9G8̲@Sg끦tg>Ʀ+QL𿊯 &qS2B´=kZ˹HS]HGZ␯EGùj &di'P5Ai:Gv:hJY]KƀVnmpW΢x賷87$3 [STasԑn?V#GO.SnXɮ)./۟^vr(i2ZPSJѮI=0s[qWB>WzcT1`|A$w]Ƥ   zNW S OqDzXbnX0ZT1AoL;+QSEs4=e1܆7fw"9[E9ZW4P#ސ*Ga IJ5NMeBh#rN@C$ /SedrxDiA?~*Sr*Kd@N+:1y0¥8Д9(0VG11[0*RW8{s8Jx}f2WP{!l>MTqŦ%g@jR å1j ekꡈW](gbewj7yO⣙vՇ+׫G'etE9Bunwjʠoe,]IK~&5=剦wfALC/D+!sMĆ Rb_6RM8hF7EOZräTf1Bʤ*BU+Me"נ|bN>*DɎ9|UQ:U(J)OTF9 u3np唞OjXa~Y臿sH˜E!#MFHBIc0GtB+ϲձc@1(vDM`BG=h;|G=6cZ<^"S|'{WK+&ۯ5yHug@1rSpog eE:WUW * >8(}nDvDR0M`0y7)HN 3b#/6R+iƧ\1NP `^r:<״sEwWW磏. /yu6:_X+:<2:/B[AZh\:}!K%rk#ElKϣg3"Vlz>OƔe}6\9|,ڈ8L<7.-rbbVO]&| = g#b$%Ј0М<>ō9m݅] "vPYxвQuDɚ)/V>op%}6l/ږaiDz1Z íЀ L2A^6(@mHgbj i;s:pJtѻ#ڼ|oY;x:~W f5KQN_7^S_FTpY݀-%` |́_ mnjw?|'T\` 5]RXPf{/¢u}0sw_m/eP/S"X!o/7mba |RwQEz]Yp8ϙSPuBQ!8xKh,c`R{mF+U6Fgjg]<Z;*Ɠ+]y޻y %-m^ffF 8 *A, ƄÔ3+iAD$$T">JP߽yATNBWO;C3_q̟G#emRTq*0So%4bkוm!MxKJ[)Y&yGۅV!-u{%,N62Z1- QZ6fP+:B˖sӾ cQ iQ %pf,yl&sd&y2F/dRe蜎1rFL{ˋb ^rP48fX` vtoj=.wJI8.}ܓ? \RxU]`*Tg]-GLuSf:IU4ѫRt[EZdduJdܒUPRk_8F?Ds|u%Ts6C*nN ):{/Vh5u^LƐn=5P] Wbqyi_UZ .8%:쎀I\"Zi:_Fi$*>q]w_U-Uޤ81j QDk516Pyx\Ŧ5Ul:wcL=A9 V_ͯ"!+-:#K T kqJyIZ:!yJR(,F~]S*W\ǂRSYm`}[֎j{_i3IrqM%eqa^/󏗰#t$ HO(5j9G}cdpiNc੊ ە4- ˤ!x4cLk0xr7^,/2LPM .c^N79agz@L3آczT;Dl8>l_V ;n% ziIG[xc1V"ôVy"N`<&Ű 7xט76Ek7`lgd hꒈA @C(w/-`)^J!-2 M9ZcO|kEʝ=Ԁ$MaPp !8 s8TXDcXhL<F!l?wa_J 8>5^R30i cE`ͥOZ>ђ=$d<g;SD!7`r Lh7y*=pCq,FpbcRJէ:mL3u+sR=isHvLJ>Tq-{2Jf/-W!LWE<(,w_5-5ˤoAt_澒A>?#^vusuKMXe6kqMss26O1FmGuFUf;+Ix=~,{ȮjDn-2%..hX&u$s53 `sj>cp-KAI P.Q>;b F#zAkFj!GBWl bzxrM3Vn8;=w?I0LF(bo- b%%cv2θcwSr`Pf~k9 fA 1T18_1Z1cHCKPǺ{趶[ Xsއvg"KoO7W?@x|`MX&b-$& ttnhFܖ[7v" X$ǔI܏&MjҎ)N.7Wֽ#g\Ohe:1'>Lu>alhf䮸5G(eb7=& g=m,BR?nJLⷣ}1*ĩ5~j\#U1]ىhkoq!)=c| Q߬%Ҽ%RϽ]z)^>(9Dhhy7`cMd Fdj˦]K*_󓟛/UoUAuw@K{qT,}0_.6~_w3nr{`iW?K'?_Cn'_+|$0w˻U]M^  G.S:'xзE2t?%pqAk%RxeRɬ̰Xi-C#p{$p`"}d{zi^R~v0?.G6+ j (ݞRO)uǼ -NMϓك]g6}(_gC܂(_1U[kzVzY ~ umH E ,7GӦ"zqeȩentd4 ̼Ԁ{Aon s74wT>wyy kkȐZҋGzm>i1i @~ +Y/ޔlUN5=c8;q >%ן {'D05Þ'r'ug2erz;&3aB|iqaLI(V`y%ۧyT jBHo;pNx!(%(:) TbǼ3)&NDE*b|)r26'$-Ræuϗwm4ۭrM?䘛VH1S]U=#IsJ "AB9'? #.``iD,0-dS,Ka$%M5h Oȓfy@ 0G0{2~ha1F>*[">v`| r?J "Ƕ fŖc߲qI*&${4ٛٻ(ϲ26ڈ,Yg<~wޏ`Lޙ~eOajF ثܹ&г8Ƞ 'a$ Amt26m"XqyY_O8=qxRvCCG\[=T-@52HQ{ԹU( f]uR<uus!$NO.ZO8Պwzr^=Xї,_/u햙P+$hRRj!uUPCX)q5D!V{&C ?, >ZY!ń+FB rN}S5߰ G$yJnd2sqZk|RiiRR~$tg8n(il!T:B&#TBiB@P]PJ(M!B@x/zCd{"c?ax'tP: b0} Ŵ7MCuoyc/](HYZwnmYA bDDzL> b% $FFv'\ cwraD[BHbè `^cȔn G9Y6aFG/)GUka[bϾ_hdZxl ZULʒp@ ֵ[e)&\% ʻ)%:2i3R`DuFF8׭Z~[M3rݪnX-CX05.r=hK&/Umfl#t6 C+N⍧_2͵m-żm͇ZGWonm4VAn99/?ϴДͳzE; Ŀ M1‰ReIFɔ@Xn{>AY 0%IpذUB0!NŵFͨ5ϕ5ZjGRRT# Jq^y!"ba[Nrdv列XZ !lX['&.|z]%Zc]$yݕ:-0WԝR*uɂ0DnE܍?w/h3)8˞ 'ΐl'δ@"ys;'`ג.k8+mJ+iYP]h$:(yv=w0e:튏Ɛ;,h~VQδp: pDG;.s?:wYqaH}&Zۏ[*2^SRC8xi` Ju:#UEVC3:vN &eL |K1Rb\tF.rS*O5.fǪFv;G A A-aGV"N=sLkG=c^E*t;%f\,N.)dYp[_T~~9zC!Fe_5_f˯N_Ea?U:Yj(D Fs[,Ed5hNńy#(FpDBXPw T[ Q c@=dE$xBl E1W4tKbV`'~jq(r.c/c0X٬[aHw9AoEQ鶥 qxdPMDBb206""yrj`@`hL$Eȧ5fPRpT@,Nn%K %9&z?0{52[H|o8 d1)% 2rF*IJlr F3Fm H|^߆:*]+D51VpN R,B=X&Y>A-\%\HΆYVA}2r_z~<} 3fq_#lc#F(pVx( 㑷N鶁a\Li#p#(1lЛ5쓟OGPawo>&ojv_~j~,}6|6IzcͷhVpCH~2CdiEmaaǡhvV~"!s`K#!8Q!9~m/61#ѯձВ@<{DYt^Js<@Dɍ-MYEP엾 [GfԟO&y?$UGΕTM ZTiy?YnID>)I(R~Wk糀؍| fD4O<~lgOӰ,% &kюsV%ZkӚp00Ѓ@<<M¯JҞ&a:[)) cQR7N8ag*юq_-ϐB-F޷ J0 ^O8H /Yd\Յ_C1@9{_L$u2 gɼdk.n!۴#gYYAp%]XB`+JH2%Iq1cHLFHU^xs.>gB xq 6I]_j޺kp%m~d wZfk,o3YV3,1XQB`8w 6.(< ;Kt{eu-ny%u>˪4@+[ZZ5ϖw>XVDpH륮IPq8")M™IuuKԴEYAwacaxG:i fm\/C5'DT>7\VmM]%v̓f>$|xPp $&|{3? DW?RЗSd%p 7=MxSvR8%!S-j"G-O\gFkqsAZnٞP݁oO1@k$Wݹt8eNr_Y](?{@jNCq#u1E^U eCSۃxoa?lOdPz {^.֙D<*[>nq$tG=Ї{_k;Ȓpt@L[ԤpEQFՆIE-ff(8g@HU/NP' $d(O F uYzm}h9d7J)X_W֢!|Qy1UR޸%2 t_ Z+z4{=kw R@XC>PVklfC3@+9c"4w{cXG,MGa} G4p]S0$R0}l2)kfdz5n΂9.S#JDEn$=9d[FօrA9 N(/A1q).i`U:7؁^A')mWɐ"pA}&W.AI9$C,&.|ڝkJN0p},hj.2mΊ" 3 $Z,WEgIYsp@57gB1 =e7sCuzV!Ova4 .!o'\@9FDcpygۙx),yA: =gi{Nhs S ON٠|S-9ӥ^ ֲS/xڡ-Z1ћ5(P9M%fNhpCV,!q -9〶Smk5%E_C%L9[!a8%UZ*O0Q9JΩHSJMW=v&*T[ 3r_)Q.Z ǽWԦLzhՐ)3ޜ& Xģ<,Nԩ@h5ҳKR>O_MxyI@" BҌePB @Ha Ĕ\P %hӤ_0. PG% ӥ׫~`1d0L^}zc1x@ D2̬:rXerBӒűΌG ʈd848JH9 `(y8!84i X˂hQ:}E4Mcn[l$Ly?MOz*zY1] j=kk7gMY3#ԯ1rޖԻ.u]luT* ׭",^t]~'YoգŗO[~_L+)-ˢl{9oO紺vWk$}e]/Ϳd,㮋vjwQfc^hi::$aV˧,8fA曶l07mی=fmZrOwK1)}(Qc T3l8yڛa+%x1k3ۚKmf;qFWf.i,EôWꓥْR+Vf|%Tbe-Hź=Itn'wѴ{/4J?F{$2ICZs .= :!1REXo8JhV醣JQ_Sjdl,{u'$֞F\)򧃋$Yv% 7 =IiR[7[oNf+VF0` vnFlf쭵,o3%CKrH*yW4[rnfӞnّy^t麦wZ}m$"+8%9XNﯼ.ΡU"хlt'ot oo.D,^޵H=JN.| .`yŘ(*>?pj%Gz5ӠAqW+NZ3Lԣ-B(oE5/zWyNOL=z/#}GH}9,墦2E)%(0f,reINhz6c$R/mQfHP{Mf_ m[T˭r.ֳfuηsuSe{Ra͍R}%V?Cc,h EX(Ct(2 0IQ\Tf úIK`C)=: 2mox Lͱ] 5ZƳܿ\bsnUsSoӔ2C|~֟&fs ylԹj8Uv1?9n_՛bꖫwkNlDC3D@{p 0_^8h~2:;,H8h30 $L?8m/L^vl{`K(S&c6 |eQ&‹i 34`8uPIA^,܌bs1C8E_-elU*NϯJ" Yjw=/A;_aIAo,;'N2Pƾ+J'ʖ2rA]TqX+Xn`_%Μ.7Vx.IAYWY?HA{t)d藺/1?/0$ȗmQQIT/E >IO(U| %Gb_2ۂa̗`^>\ē^r.쮗x(9p%[o#3NA\jw2:y-?ܖ#8*?(LP/6oG,jG,/qCBɶ΋Cc$fo^"v,)޾Ww%mW'.N,A j Ҙ6w)Oh?z٨m-_4)~.98YGISxxU~1P5RkK~ fʓi(D@  γ,C8 *]"JL c& Ľjnޅr z/:t{Hkz.(u~ycgvRofgJ(1X뽺-.uhqCKZ\W)'0E$Ti29JRGa)3XeC X HTA^u}^u}ys+,bE%Lg7^^:4][8+z{d~9I1Aҙ=$o|k[N:Yߢ$-ݎ -dW,VQUř_'3~?D#lfFf+Ez8F<)H`+(C TId-׼:0g3cF0`)ŭV^MİHq\0t"OAmwzc}gէЩTc*,!DF~{FV3%*IzKH;Ƨ[:m|l;݋L6-ԢE8t&߻@ ں |{E)lFgoj vV{TAmZ6C^@IΕ"v@5l ۇMa"_Z3-Tev4nܗd.ݣsoض"W ԍЕ۫Z28l.NVKZsi]@דx$ȵ*g⏇.)v$\gÿ*@KUv_p38 x>L\V['Ih268nceL$H}.")hD= EGFUГzГXC3ZL:-L\E<%|0qXldL Ծͽx66; c|Rr :Vqi`*]|tϖzCQ`C=\( X3_CR'fF+.?J8/. 7<~fpξ08>11JPJ;ۄSl29nBdH1%x= !JI\{!/f.#"æ8U`îF~7%,g"_kFP=P%~- fR8ḑ$aόLEG!p2B >R4*@՘V^r: q?0QWKu^y;fgz _]{ J Rg(&`!X$jҬ4ýR55$t`ǝFs^S'#G)?yi@=J_fC+$$VE].+g&oSt>QB듥t#F"լ\ՔX];6Iy[iY$)*F&E=UhZGːҨ+(VxZZkXdŊZicmUrWWT9j: ܵ~4h")fd? 5ŧʡE(6ǡJS:n aprZ Trí[4+#%nGQj:x[ zZ1ΪP_TvU&h)(,o0b\#1"W?=7ra%pT>h|U6\R4l݆%_mbfj|4iwYON7)B-guI%CU欮E.uVG tS,uZ+TM3E8SW=ΚbwuqDwi4;YjM'1M,Ho=uZA_g>(S;uש䋪c3w=é]$__׳#jޢ̞B3D%6s}6MR,I(D'U|Sַ?8eY|"ZEHQ)Vy?1Pu͉J*J]ʽVyz;krI-*:Fr:0jkz-됐\D+Ȕ^[20*55P=@Q?~]N :ϟPf7~0[>e$3Fv+r7[@PNq٧o 6zY,ϓėe$MH U <6>N`*-Z ܥ!rsr/y4(g؀|-V{eJ`|Cef(w9)^Ԅꚬb6Z+|zhV8#,i>* sPr!. lnO]ObWC"]4xMD :'^%!J43HۄWƭO|(;zZ8 $< O+c +p6J(@ ÜdX A2upNn*L~?O)ۑ1nEE{(Q@n0At{Tqn;)wkZY_ 4GA 4J;:%X=I4L'MKZL^5ZŖ>ZՊSr&f9x)\#c|*ů,a󻁑WX T ; ;p"B !n@O4毸"hI(>YN\|@I 0Z_qcܒFB2FfD땃f \ ʐhj g׽f~n恠\@t7 V`H)~ZE?g8E$wŖ--UEk45 خ&.*u{yhF_6_K+?/ҫ5r֙g/`YN8,]dfAφ;8ݭ@b)? Cƅ 2Ѡ0D+X3c|p;@b -e(#BkvRĬ/$/ Í"\RNÀ(X Tc2:l\JB$C7,`V St@N*aBF4"jVAԼ 0B`w/OloIlvn2AGәO롇λR*j.-AdzCQ`C=cb(~TC3()`9'u]r&.7<~T>'{Z1!rUz6yŠn#G kSȆYT:wviTf@$rOIT\)W77+'xn\T];u'\'H5Iv&C7nA^ݣ߆Yb3 pfѝ۫n'q<.G#Pt.<^C'Y?g4d-+(_wƃ)=]H\@c""R ޤHդ \ણ5CQB0? pT`lWhA=\T*d\|ʙac]t/4v ۑ1n=xo+ZԌ>{BH;AA՞uۂOQiSwC6kU^Ow:8'w{T 6q^ ޔ[烢pӫPWa[5#ۣ+s(W0ʬV)pNԎEL!pТnC~S %C4B '{5d='ͯoNO|lm<Pfv>~cRqaif?I&hy>Lh´B "}i $yjԜF5G覄>&t(:q-x ݰ9T+֢a-&~$bp !,H,gС2'V!}00?ΚL\s/ǣ*#)-*B䬅G"1Sz6D=C)IR C z\`̀X#@DHG(S#J50((''I7#I_;M}@ {m 8ͦ,@}#dQʪDCfue)U7o*l'5$o/=?;x[BՇ7rnӣsq4*r _)F7?SfQpJj?uAjWʓk^(@fF',v; 9(FSa%` +bC 1LsnboX6mBR kn[Q8y?a}/Y &flэ;sm8)S⨟eO`8,ARMQ:8 C:O%*_ 9)`=A]ѢWjNRa{=5==A. * CgPS1> Z|4E%9 Dg $#S^Sw`7ù ߛ{ד2$*a,~G53ՖCm>TLͻafx0pP "8R?[8-[BfYQ` Zij20S]:FD"A02pjy5v4Q4ԋtjx|{SMy<z&#%J1gnZN_)ܸ#@P:Pf7b4?`(kb4MJ烟f)/U)©g'1%te'Wr@ 2q?{Xk+E=|syc ˂V fƸ=Bز&9e {x!3I-3с% &(BHy$Ɓ"y$Ĝ+$*f96U, c)fM_ j\ *ɺ(8 7!lKXC!% 9V9y%Yڡ^Y뤵BS<]ۑq ހT(&x< ;R*)hL+F鵂gƘ`zjoK HЍ:?w?O`LJӻ WϳTs.ɨ^_w/VJݿZƳ߿%okXWߢa)CdgFɞl<֗Kv cLr(/OJjjLv8 {ᨩꃂ#UBt!rkiܠy7'YwY l.ӿa03:Vg{~~",`#7i}YՃ/,"B+YR6z5Vz\Ddj7[)9S:Fv̊Ѵ[1ڭ EH|vd[)9S:FvĤah8ڭ\և|"Z$SF[qγH̶9 `:=caV PhEDô}VzލH)Z2kIGagx8+ ul1uJuMD?h ǓIqCq%FM_zg{0C1JJ1(=>\zAB ÁuGL!<`J)9jm)Gt!iCS :fS\DKd S6ĮvJ11h#8Nnńj>$+"mBg-Ѣ۬$xsoq22dA[-hR/(f"NJ֯]8) {Pq ;dk?ʩOэ\J(aFJoe2~J5g Qg(g\RJ&6\[k0?5L e aͪ^2uꁧLztuFϱ0TT_D B!- e1\PuM *nGV2k] @Nh 2$G ( KdZ01,h@(An ˞+-Ŗ2!$w96(ެ $X}ώҫdu")r->Mj=gs'l$^hʩJc1%/p%#`> jwXc!n@ەℲƷ&8ɀpBeT&ZFa28O9HUܦYH857H2qxI *("h#rȄv+`V Ut]YfI+ػPLb)B`9ܰMV0YJtsdЬcBaLk VRV &Tc0HkQ!ܶW{ unbR*\un&P{2Eid D(֮ЭN*V Y^䵖 AqJqb-4)UNhe|nk7qf,>Ark02H$ 15*qke#AdGu }nB"> }/=O&u&p[w닋=ʼ ͣUb썭΍yLXӦW鿙=`=qN$=?&ϥhVL`"(6j_:U|u)"p.";~,`Ϗ0 >1Hs`HEAТ$6_߼)4p$Wm\%u7wgk,F 83ex9[O6N%8)^֕B%2\LcYҧF9;K$ǰLzg+@<2"zJ05Ӱ)YW1IGZbjC15Gw%JXdtZ헡7R(ZC<; ZAER~N:oaR7w)'su?> 6Uɋ=)w-mS͇jt`)雵0sX?_?outۦ娿]% #QnP` q9GQy-GPj3:HsYce1hOϚ)`u{k{# Ή82m[(?vaJęgΆj J8TFT5]tD:F^BIz]9!6蔚ݝ0Ed(c%J PZOݘ{F Ck.͋y#{8hA.n?Zρa)CpoQ^tҠj4 </[-} |^X4#o@x›0Jp1tBl߹}drn*3*x>Q X`4^}xbt=}n)ƭfV~=>VI ĤI#6[?ų}3 ѹO^9wS"ׇ+-JIL57%kR|T<S&@%Y^+ Hq6R3R-_J-{kY FG!TbrڠfG~ +_.cũAI.ٛl:&K)©Nd+/UM6÷J2?J*Ռ;X MrE5*ƯO$H _mΒCeS ~FYXPjL @ E5R2x sH -A8 !hUTϚӪj\2ynZZc6- jq!5-;r !_T_AﱕTJ|AW^2108dsp+8m-_|Ƨ#{혣 yM\}>?zŢ1>wFz)^tbNsg9H6=Q'UkZpˈ7پ:M}mc\{{;S_^4% `,oo]IٮczhV[5H9ǹ[fsYt0;Lg;L_=Xm E&!XE, c%G3R0P4,B*&_5?VKm+oz3D@/gCl+nm/߇m}}j\{J$UMldr(EkbiAr9Փ_'teƱD%x\\Iny{ <;f-vr@8:X}7 7pu7>ڊm){hvza%.a{;XMj͐TtTh]r4 (ctʣ 2THIt=m%?Qy3*J>./Er]l8>'+J車cY0 *3Xr8(_& Rv ۋxeȹ"z f (#q_6DPr_̷7bZ$s/;[)3$t`nt;\_]8BoZ/NNɕ;]=4-WY)OW&eƄek+tG<()QQ C9`x;fh# t%AŊ^|gZ0qmh T;<7`!YJAq8hUR4h-!%kT5F,vrOe-XF7pN;NJKS*u 04G !(iũЧzB;˛|͉7d?vQi ҳ/I 4(-#nsJ(H/㫡sRDfg$aR*AjT ֧dH5W⫦W\n]hI/f'g'=vP5 /OCs;O[7|jӮWZp) Z]= K;YHp3/ᛇOO?-Y>:0NaͩF?͕l?'!Ll?i5c[Fr4=>'WdFRrXC{.XXwGMqja)~|':7Wܭ:ܩN,`RĺidĚbr8 @Zp{U+??^Ò  F8J$BZJicOl\I7 CY}hrW7j˂h3\ ൅,^cF&q.Q_5F8h_AR? e'dyK0QTq_Ϥ DhQ;m4!?^cpr ,@)L)'®MX )b3 Żg߭xhN(+`h%%)vԚIJK )9Uhu7gђ6lLzeK[:*LjDQl)6p bL*g/ʟx XS\ Akx#4/zI 3f77"h !t:X4|iRNT[Pg>i 4Bdf]0:c(Tvl3jq-ѻӌ1fMm=)pQM~(}k/j+'_< W8 '`\R5 LFg9)vh\ 9-A"5 d>iVO>|K: SaL$#߆]q%s{+ +%zMFS-1i@v3'HͼQHPƹ5a@Mrl3ݡ^AL#ED"7 *I4B? HH,#H.< OyiX I-8w5J$'_$wfc[ֆ )(AsЃ5ϢpJm桌!kU R ";\ Iu-k5"Z:-.F[| %Z`MW61@ d[אJRۡ&4ih]Ú5sƉڶE؉1議',VUC߾H Div=a2] 0kc46T)4?)BKC;_lZH[)bk/qʶۛ73]kF7.aU:7|wpxWjr? ||~25&}ѧN旿+;hR~l:p?Ky1۲pY&K[^7qA^ذ]*K5eOACN[pWmP~&HYYkIWgDbla386zhO XyD-i9A*9Eۆr˅w:Ac{4dDY-)5>?J.5J nZVT!Ⲛ\ɱH;ۉq'9JSr`u]:2m$pzלF#5rg <8Jևto:a!$s]s wUv#pmIIc6#7ngpI3EB@V - ?3s)P|r7K`)f@(!'Af{3 +eR|&8eY@dưƀ`Rpum/b1WBP*K% T R4WugIw:|fF ?CtP>ǺzB>(D#,G;bS9{ H$%?N) sUO;xFŨs2, -I|,9aUqtIʣ>2-e!U)/ԥ'\L+ ɒ8`p GwvN{^Zߎ|u&"ӹ3=x0a^ab^YK != nÎyRqU2Uޘ}8xxh=>O? v7;Rȉp .]K֊}L`׼1}{sǣ{(>ym'xe+$7 Z~<XcBb o`Yi>ȫIߴ)^Gl3jUՍFi5I`呅9r/QM=>'E=^#h{bW&H՚ Cmc4Z֨l' 1٪B¢T*p2ZկӚtzv2 Dv~UY9Kk\㭘ͷ,Yˑ'ޥ8[flzVfĴ◧Ǒd-Mz̭4߃ "0ڛK)K\ye(# EpR_€pb$=B7ڄ|~ b2SIsJdWz҄;@w畺G@:+$o4+ju^Zj0zQH k3~ആiAk$KKsf"N=^n_h>&ZʃzINoo4y38OGzb6 E+$ :tfW l%kZ%]!a--bP7g7:s&gV\J@zٓJ"Gjʿڈ0j)_ђRg|3`Y`n'}M=)zͷͣj0LS0=:}a8oqHWZ5TJqa0*a(EI@_#]Ct4 禉G?AH b v6ϗ6[]eJ~y?6ߍnwū Gq0/NNɕ;]=4[g%݂x-zAlL64x>H2.!Y,_gn'+bhDpR €Omr0ia2 7!,<# LO)YG= F2kQ)ve5`wIh/)!Y< M{y:qa#4=5]Wp ТrFo.LU ob$ [{̑أ Fhal ʰTڄ[Pz|`'RTKbsǦU7[^J 9F:ˢA4t]R D"ҳJmպ U1P\AWz1# V DR$X~ƚ{x̶VgƜJ^)Ѧm4,Z[WmB/<sJFR4 QEげ*ͧvNdZ4㧸C=%ߑu! vg0.m*@{OSek~nr<&уe7x鈤xv%\c8}bwrzJF?ڄ\@ `#Js݌1+Z- gJ: C')=Yb|FtY7ZGJ"<F@swU:PCjM-4%0Qd5nn. b-5 ]&G4Uqt-SF}#CRP NyjQ xM ĐY<ƿl/JhU8_@|gVf̐햇?)|1s,lL @Z4}tD-? =lz Fq+$3PFnV)  s2*yUJSS1?i/.%r>)U(`5) VHnOS* ( %T0*j Ǔїo+iT0*)+?r.-  * H{Op"C]#٭19q`)K/62%i흷$,JvXjESRQ[G7ލxr>nӆŦooq3{7SvvI5}2\l5݌'Uד/UὪ[=ďU7qV?!.(l ,µ͆\fZq?@(ap4Ɠ/~8돾BÃ$$ƭՕW15qÆYǵo]6rՇxٳ\nAVd[ijgÅQE: Gfw3wVshݓSR'Im`+~-V?s2( ؑSRѹsJKx7s鱝q<;}1ۧ'g)<(}þIl9@{靓w0h9~hm?DCr{ `V|ҹ>L;'1Q^FBw\gسYٱ:?sP88<]i=Pj}{uslV\S) ,yՔ-yYn<\qj8QFԵ}sVǭk):AiPOr,9Oq4ׄiDt?~(gQmIO]\Nq~8evԛz£깞%vq)ƙW7766c՞CPTd{,zT̨faoN^in>tRuIRbێV2!߬mu캽@G}hlNϓ쀇m?Es)+]@hhlC8kCc$\68/hwUo'ӈ.DG]$BW$%$d+(p.#[Ws;ۭMR̻PC#=LB+C83M*Q!~4ެ\^ 7ZJ?ϓQZlyqc*ܵ`L 3LnYj?n$񒡥y,F.9m7>ÝocN>S5{l~lu9󻟆U}6/iÔFmvǡm]3޼iRi~k"󊞝NuȠuW|$&d7o0~,6?^UTJJ8z,)V%1KXcZ|~?#MϮ@E/ҎhRc1d637|N:; ŝ/m!N] (*4 erFzf'͖ON.z˃ ;B])diL>X,MPP|-dvayO0/"l`4(@ k9dvJ)B.GC^$~S NrPtQYWM!K.D NF,Xh89I'T4eGo(P2Ca") Ή\/P* 3Gĥ)6y])紇|L|̖'G()R +E&1c dc/Kx"O^)B$a2["UH-o\h \OZ}9{Mm\66gpn?9K|1{zn, XC?޶%k^d:%p1{/Oa"jLr$ԯ7dFnUq@I>%)'s;|?{9~*@RÖS&)Lru)?\^bSyZ8 PĿלV(qmΰ]ѦLqI(TB5w| UN!;oI)cPs8Dޖ k\XDII2\%&:0!!Lݧ0+օ QY0 @̄ЮjSkb1LhZ29EkK&UVF\scg!:i&{ Np;(Q\:DӅ*Oت pcܦ>\g87 _W/ |[_'d{.cNܽ?>ࣘH*JY"M%%Yz/([ĪksςAjpC&yTxdӜ,x8tg yڈ_ଂtpbҁyp$ \iv'"gx8_&n]d(q_­5X%B, .ā}"-~HO9"󗺿8 s+631qG`QQA31+p %b(wxD;oCkƾ.skpE2KVbjBr6u>"*pʢ)wCW֟W9XG. uDRgw\1όIJ=r5IV80>:Op/rZ!\Ozɓ <Ȩ!RKGuT1u$[1Q< ')gG-C퇒ؤHIdB]v6-K #zݺdYx h/9qb٫wԲP~{m}qpݝL<6 Rg9ߨ`onڅs7ԁEΎBX M6(O"TЄ,)؀ +OXR6w O}outf*hbh] ] \] ] B] {=ٳ],;J]]l{=Cٯ] ]̉>Zgv1Gksvv1gO.Ιw((+LQ"cIa087N +Qa(Ŝ]\]Œ],buv㵋EM\s갟|z.&Yɨ%} sfv ?O>J ^Q2$WW+ E΋Kb˫Mzǵ6wI¡!0|BY@dL#@o=#L6H=_E}*V8)#K^(FR-exp8OjJ"1; * e%sgyGr)%YQ^5rMC8i1r{b_:U]r}^InoN06}NcȎ?1%R02scYwlW/ifeכvk.yR9CfMDMW%N?>^NtڅplFU'N#k&lr[9?]ko7+>5^"@ &_6p#W#9ICIǒlĹ-s.<9< A[gK]{>{^TKҽ:rAvߗ (?RӡY\$ Ogߋk\̪fUlB{&`۞B8<<깙=1!-;`* )zTF@JXMEܵVg3/2Npo/ NªPg//NքHupTS,h!g/:XN5'èNZB=l ^^N tVokg}.?N/ne{#-|jY:Sleˏ4O|qlRrQTj\uFa]B6[d+cH?8JaKհ{,]C}N[Hޥ?]1wvX^nn'ޛοs{^1[w-gfstգ?Nf|a-lZ\zPjWήSXKؖ-mm~oѻ4[}?V]Z1L¯kK>ԟX7{]ɝy9>0@D)nȃPU!BZ#PIfu]wYqDn(΍6h\5nGnG ?m`Ym;uևBLlrFKx[myRv1Q݄ɍܪե*M Vp]sv "f4~ڒ)}wbI;P[J'{] =*4m-UMeӪr=v 'j8(׼3׼\aa?V^gh-X=$g6$+AM9G)AΥ@й; 2c0J ZqA}Db'1QHƌp$ :2<ȺI֍i Rc3mے( 9LC'Z:fZp Bz^ ELK"[H$2Q_gxDuEFhD u Š脎ukoyhc-xe[E4D0ݶnTtbgA cb/T73 hYVp )"w>yۺ ֍|@ J%W[yJ O֞:nH  *zWajɐ>W oQC]&h9Ts|!_RMw>_vjؐ[9VV VnPB yxC^u2up-:l 9[je9[TserAb 2T Rui!o`B}] jc~1f/S- T& CǘLQַ3PbCRM- T 9Ęs;ŘjEcb̕jlcz115Akٿ3! Vb/"Lȡ6c̄a=Ęs(_ώ1PbD!<ĘhbD3>ĘsP|½71f bCJMŘ)t11J5"ݿ3 b/"L9A1Scb̕jP1fbCRMPL/LCy1W >/1fFbCRM /11j5Qٿ3 1cf3l=Ę-Db̜9Ęs (bw25s0 h]^N3/O+(P7g^?N/ ?Yr2W}OSiSX4WxyYP8ҮA/lNm (VaѺ^@X40{^1z7[>G(_X\x,{C]Cq^u\ -SOqj6W9n<0FR*\'Γq KހTj~MwO@/EM[+fiZ칂ok(T'g#`TpTWxY$^j͚/L~^ЋZa.&vɺB:ϓ|jT,vg9%uDiV׹Ų^/m{{;^yfOZٙ>5@!Sg'aJS 3xœt4A8$Fozl4, uSAwѧq"xPkEF`d6_8JߵUck!,]WA҆cG hi~vE+}8&zr|8( >GtЗ^B\@h/.(.U\ggR#Ot,ߖ8Z"p4խ+,IGfi_͡a[Je:oƣ|Yw2&. O]Kn*,yV\7cOz-i>(5X| s1%737J|+Pm=Y2Hƫ W,='(Hu-f0~tbިٙW3fg^Ίj6SDu9&#ô8d}UI >iD)̵mld8,X"e=Zr9~weףEZ,]Ng{\R )OEEY^g<ˡ>V^L.Mf>qWO_&BɄXhw. DK@jf2ϫF?ϯO4?wh_"ݨ/2 A nӰ,s5d.`qWwUqn?q| B #X ֱӊ""'LV2il)Xh"8gKԗHZ_`&g+*_֨ɟ.՜W|w Gq:[aYгD<Zd||mfit؛R؛&<9" 宷4hF# KKmmByDDZgT`.\b4BE-B!I_DqK"i_K#"1 @")Æ aBb*rTDZRk"Pba`I @1", )Xmn!B9Pp9̿ԉ0J"(|ibI%w6Q!A9V1B\ "H@\ʼnHP_u5` {VV!}:feqXD(H+s(FED(+g0+`+,x0[#e l61xE8ӿءsa DÊ$`,#h1qm/'$hB Y9dqH:$5BjKQAvV$ 87A}uXy©?nquva.3cp0ߢͮ(^Nf%3M7'KҐbTߐ77ߏ e??Rp 숈 "?3OQZ-..œ#^/+ͅ\'Y=h3bFztyRk xoqG ?{Wq iEK . V jMYDrlA=$gdSfwsf͈|gS`¥h? `K-_e^MR6@'#z iNL 4XQ |r8 LfbGb)V([^Sy$OTD;ii9n.V!o4)_Zd*@䫻@0](>OߦSԏ8XtM<+0"(d-΋e΋MTx$V^`Ve9 z$"x"pk1(EDwe<}`oF-HWrg7Ph@g2Z|wk<.MLzsEj,'+1F 71Z({Wn}j5K gǫ+7Y:" 꼄Yݢf%G3њ8=;0=N_#Ծ}FoSqON6t5^KG@S}&f OSa(xO,b'zvm&G Ȝ$YJ{Ëb: )yT+:_Jzw{7]*q˻铲Rͩ_ofVTLԚ^)X:|? Eh󎻆J>:Kϫ/0}tOH+7gCĪaY  }Y`ZM H^e{Rk0nSK/Θ%D;t1?$TXxod%2:^Y,]cy>JJ9dgPHHەN1e]>'㣵L\{eDB#* _3w@+t\n0 s/gC ryawm WمK0g٫}̧o";_Xy8چVz9)O1C8Hwo~/kKZهpiڌ|OE|TxKjv iYVz(x;k2*/)r eC`h󞚤R5ѝ{ "y~?_-6pjTwsSh̷)Q4z>G+n4wS-U?Ƶ@Iec&\K2QiՂchvc_ۼLI:ABJtǵ5BDQΰqN0\:Ɩa 0}a6εXv+D5D{Ȑm9%7?nQYU:W{*ܧls"e+?{)[Fu*Uw)Z032s7{L 6yܜ]q*3S@Q!nbH'z<A)Pʤl{(/6 #v>-Q`ur~h|t_!} i)Ͱꌧ56S8΃+H) c-\;myToVVNmS=`9=FmHu~hn}&@vbu> >5X@("Xmd&C$x}9K٠=\)B5? Cq6!!Wu> ,dπ sZ)7-nJ-4I 1v (.w`3yn`0\D4Ⴃړ9$k8Rwε+rw\s"UF@)\EocVJ 8ϩp,]ր|FN5I$ \1hȺ՛LJ{~SԵ@j O PcO i:a:M'Lz´zu-r2AGlsHse}oR4"s$)XB me_]۶VW׶dV\*IY .OsOOCRqMS87M4sj8WGgyc&`*F {rûXaa˓:T3&[vJv:+>* f(O2)z1%{]<ױ4Ŗ[NSl9Ɩ[ C$ Q&c%=N&$jA1H4cD9:\쨝l MRItJ\ewɀaN2UYA0 ^[A (fdǵU"UA()ִ:ئdNJu[QRJƻsJ=EЬSK[ yd-0- AC@ #F^+[Tr#kh-9umsXlCIMP)eZwt!ϵ<]M[guU=⾵)?GZ'(Nx$ɻݴ #7ꟗWuuj\S㡞\p`|7j ;Be,\fM4%0pg|H:K!A6 kXvc>@Z "1T܂U4 cp,f1h ^ZΙȳȨ=kۀ!~gdxT|5obqFp_4?eT=+ݢ6MSҢy"w$(P`ɲyphmm:FITX/h4h0)?Fa~1XxO<>3B82`S Ff }|Zg1כc>Dl`Qs\g0Mbb8BbK5Aw=F=*"\#$LAOqANk`6״i$-Ę#h̷8LҥJ rG L{85I.c8!HƔ 8}#TRʚ0HXT s  !mM:5R2d8 LT_[<bTAsoŀFLu:%*&ZLU>J#=рsJQ4& 59(T~D۠ΊuVD  (I=kBe 5Mt >mLGLֵIAQ Ld:}}A=2 yMr JB)'d`!5FC(/ ()4qa>Lz)fLgʚH_A`v%ybh~.D;oV7ET4Kՙ_VUYYD0,bY0j2尲lo*@pX*f`f%"%%` ^ I8bo80DI6SQn82{GGQk CGObpʅ$rн=x0|ZY|[ò,mx$W#ETY ~P*Vah6JlH)2pZU*M*Øfj+ rh`|w3#P'Sd(gi6Qb N0(EM#XͨvF5l\:CDU)Id4LW Vhw\ WkHSlU Rū=)kj^j{RiW}ۮ5uU^`=C>IW+y6b0_uP_G")+MPSݯОG<gݻ2נιnh%J#S1$T[Br~(3Gۛ8VYKcIra )|[cSb g9v&}8@s G&[+ԗ[|.e@. 24*L+NֳVxx׽OoonTy)W *¥P+/fw`Nh<0L>U s}0e4l6I2D { h3uhm_ B1_Ҍ9C͘/\ Ѐ?g_"ԚƠe?_$ؒ4nddV'f> #7/hY .lBm/9:} M8KiǯYb{.{u>f'/s'滾{$;ֲ Qxu,_ أru1p)R-mZU'g[T<'Dㆰ0?ݺ48ר+Q8\ 2> 6 ,M'´cYFm奸6^yc<~2(g4 Aa9kVQphupΕhU.moI5 Qp64-m47©*eC%TZ *`r NUJQSQkg5ʠiQ-FT˅IC&&26"z @|On8T׽xP -]*v朱]sD}Gb횿$;,NӉCj]x1ٌ uyWFZbt4rЭ܎,l~9I۶{)^ɛL?o&6R.Ji1錧X>رiSŐ,_VA c7sE ָ=J$Hã&1QXG)'J1%Z`liK3+T};gY*c;[1#b64xΰB F[+<ٰVxPݐ7~"tA uXa~Y)J5%D*w^+w[a2)!Q%?*啽GFBVmF,:Ƽ2 [F'V^t!fڏAZB 76yNMc̤*wUÅ`u<[<=4WC`D oV?I+<oŒԚ3=|aV&cI,ozom?jIMy2gRа6R_mf;>!hv_i~v'A5${cThQQU0o}B/3W)܃tV3 DOE)5B&u}\MFz#d/J"b%M)\tRn&K~>- }nʟ{f:B3T5uY*2"hgAt<-Z,qZh0ڧ(&0Õ/wp^d:v\G ĈEJ1aÓE{C^ia)其 Rd'5x[$z w|T'ݕ{n uuxqUMˎnӦEIUy6%5L5_tqVS"X뺯%eu!h*ikRq"kɈKILD;9"a\n"=BBE(z9O`ϖ{.eZ xLJ2&#|7je^N2E+tYFGD1F#&tQd0SJ'\ail' aS()SK'3yߒT(sRʆ^MiM-ήz8m)g}U򍈐`.#vU'e?gJY)QY܄m&yUM3*xeWV\(Xv*˹%|Zͪ7]%)d~OnOn̴?K"IS*&D…KRr RB+YPY?<72ܢo;|suU_tYd4?,ۥnpzkbaKSqC\+JrsJ E&NmHP " @ dcɵԻ:* ̷ نV"VװOx7&͹óofrq\adG;k:Y '>ҋ777f՗P8Fy =(WN+11v14e RJM%-$c$ yKDGh998#[Ђ?v;w]ͺ.w݆vK1olbrwFޟ4mg!$Ӵy[c:g ?=%4wI]񄵖r _:v8P"Q ß.7-`ա)Woww\@u;x6_5?68CI pgAv;%?-F#ʳuh8/[&L`4>c.=0&jF8yiXm ܊e,ӊ)iA$fdce3YMY+anZO\I!^k93?GK,")rp7t1YYb=FZ 'hs(,t&b?2sQNg5U,0t&0+WMJ cjyOdc7tP~*%g߇؝dՌE =웯/3^'];7}'77ZO~fvq\ׁ6-q'Gnj;Ў,T,8zITp(h=^0 $O*!*@t/xw ǒ8ߠP"ƉACA-+usԌ#bBmMSR{f1U0+DoGSKR@hQMk rDaXQ O&JA<%hGM4M"u{#$eVmSr5!>F+*} ,Z^E^Xy.U<\lpOXm)c'p5&*oJ#JDŶ18@:xHi>}V#m4b?-.QckC3vvC`F!V+RŨu=-l`ru2qdN[=?.q=? ^NZjdz4J+UdWeW;h~*F,)ù|}_z48* o4Q8 pz|cFvdc)nIPj˓ _k|bX Es Cɺ*zWT>1۪m]L9-+TG׺`2f_Vlk a ^{P!h1$UgAg~$T " SAr99!UH"LljTI,#UZ1|Խm #n $ =69LV IRb 2{|D*ZO N+B*(C=-\=cs<>.Wg/gہ ztlmpjH~ۧze 1w?'߽Q0[}zZ2^9ZJq09eCrN7`7GSFB4_];wx}svƟ/ \B E,P-{² #̜~w(x&!NgJݡoc-H{l[51 _ZO vF6 }{߷D<.D<.D<.D$&@)i_Vev "M#ڀdVRpATe/=/GKC(.@0k4|>%dK23%sFxeK v,D!3*;b(1fv@3ZhږEaDѓz(`ޫ֫'Ww{!@^Hv& A9r\yѪ'J)3-)7aג}QNye"jC@*d}4$+BȀAEdtt6%YM *+6S;+X[X$Ɵg߸{3z߽}.d~+L.T*bUsU I@̉g< SO9Ozw oM ]#>i-WV$9QH䅮LzJ=VvRS0H-[nZVv mt_Tf"ѱ_V7XBEĀRVjU *+[T"!RU%h|/N ݂-#`Eu75 FQUcYXd'VrH:*gBo:#5*L@7 "ϴ h`l0g"s{NVʐYI'_ 6 H2ç)E225 ֞fW1E=D*b+rRVS/ADbEXL 1bt+4?Q*bL60DO>C\RJ|dA0'JG6k,%X򰅚9L_9MOs</*Z@*=l sV*# JUYڌ :Rn$  1Nk\tf SDʓ]uPII=HB>ɬNqY^IiN:";T3 9WfJ`T׸۪^¹m$hmm]]Wv>e;VZ[?4mjF38ɔ.ݞsBg1-.jgj&'b`S'ן>op0/4Rle dgglMnF&~#xrX*P)[S۷9/-,cdz˫WOR}ut\o >6߆ĶȬXѫkOee>?v&Wz#u}Z1ą8Ju6-Iq~LE!gMu7f@Ik1,|^{qAe,)m^H)ܥdNhmD#&zGmBb(6T N%$;гfL"`RLJT1PUȬ[$lr)P"uxH;5Y=*wH}wߴ\!~o4Z4}l %2vb/@.^\tdG0.4cȌ $z1^GjeCy٦z'rvdhpranlDn\?-c[nN±m*fKքOG =.8m0I*QQ *cU2!Kk 8֕p}_ԫ7fh팅z-3~赴\TZDW Q r"EBN*G[Z id_$5>wO:=#Ah]*XʺVU@/$̋9N<^,O *:QtO+EG/}̥?dVޯhrqzu}y~e˪b|իX1Fh(D<% jkHق6mHmvLY(9btI]H4`[b)|\9%Y, 3NX rEv3WmT A_Q$I8d:TR*i:n{j˄Ќ)p"g&EM˼]x$ ؽ-ʴ6k:-Yzڅ>7[%I5f㊥Qݺ֧{h(  Ձ1Nmd^= 'NwOVqQys/g"FO3}][}OwggXgVroppOܞ:lŧ>uΌ{ 2垔-qW΢F'aFºp<>m_c%S%I4vWγ\}id|;O==^ ZJ/?zZznctnVP=(5=Gv7~.UW1h_$9nMq?齶~T?l7 ·-np*Sqd,6>N[]ZQMb+>*K(7$Ϯ5Bnkm;x_0;*8PshjZ8@ōزDHe-<'E~oZiSUtF?{؎ e1Ǘe2Rk#_!@T}lI%v`^[ϟ.^k*Ǜ*r-{ u]ɲtѡAګЊ]HYcIf$ Z683\5V|UG}ʃ4vTLTC2 X5hy5ǩQtJ ~wRJ%2x:.q19k_P]՗™i -.G0'mAD1㐟4LLۊK~F?GqRmN_riLCo7d;=.(RѨQ-ed^[}y|2[TAkIod%H$׶Jywf['bZ_7Wmu k9ՅͫooOZ-fxX~7MW]0LߞYRo'i~>#&w}"sPjުjeגf!,1ҭ"':V$==DM7|^~dQ^x>;?SC(fd^ Q$.R2jv7ޛҧlגDG̈:.Bxt$M&n!X-h61~ ]SE#FBw(wH$4㥨)Z}!yUqHn (- xj Qc˪!K(HGr/4Yep(7ymVԬȽ`T MfG/&/~chuwRF`)QVa1V$8۾ P\CRzYfA%B'(,QAe}=-M?7m3} &zJ^٣OZu|tK-|Oz?=IYĉr''u;?L\,!Z!N\f~p)i8n+ϟ.12K{pdŝ(W<OlprKסф|d˂2uY-yep>rP91ֵދ10lCPT /Qxzn !?bv}#1R.<:JZr9ݡG5gֿQ@qjۙӬ0MѸAhMx HCia(0q ;[!CyvRa'O 0 01rRRn1R9R_xE!ӢjÑ Kl,I5 D lS"+Aq9졸𛶚cQ䎷'V$کHZtGW[ElcErGU63CqYOb4W JKӥ. Lr*cȖ1VL43CpрUM0.T75xquq1UU@ 1~^^`Ȗ1և- -)4WRz'3{n:DViur XQ*/@7ߗ1~E]cb}ZQƠ r}OIJ=7߮?'g'-;^MQuAUf}ia2;MΆ܊zތ˭6Ld$/?_$y4 9;Y]_''O'$7>yy'3؛@™1`|sU8N|vu>۽lz̞g"ko1=nY .D ;obHʎf pAgў(I ]nO8f3>JA8r'zOR%lʢ$Jk\`a@STg.ܬ%6H(rt?Ek8LQKI;ץl@ъ)#+NTcΑRl=FV&ez~7ħʑA2ݡ @f4UG3+u*1w@T:^@VKb"w=~a .hvD|YҠ!s?w&[EK ef\_ >6KRɔ#)#Qq#>»iiw,(T bJRG+|Ȗ1V6qID>fK!9,%:ըUg^X=6̱m1=|RӻX+3On4#BPIP$V@ *dzG_hb{([md=fv}׼.XE07QQ_N2R)$R}lUQs*fW5[_m7\~j Ao3PP-?li{V#5Ȉ`m.~Fj;wњf/3zayaS &cviD=N# Bdu= d~;R\4ah!oӄ/Jܲ9=bKxѥ|.:J- (Xɤfi%[Xz+wE`G=,KPUbX-F\ycAo=diSZ(>:ɊjNuK\2 q9FV$X 0歾Rғi#:Qv攝 )sef)F8ES tO+Nopb^x C.pv<+QIk7}].]e\+79}S,ktu%6)YggW3xY~:]2Ƨa.L۬_qjֹbzGELݐ7&Hk}G1J6Qv<;c1> ڵY4:LFAHQ遒li£_Jb GŹ8A``Ė,RWꙫ d7:ş"ɺz3L(ZJqLWXۥu^ܲNGJY_edۢYc<]7tŹI?xpIaҟpzctEߪ*=n`EP@@%16PȒu|wGc%#(qu:IXi42jww4vp %ݶZ(u2+lwi]lpT PP\}j(>]}.vMmZCBi"cq]ԩ"=2ޑYHc$TgK}mN c!>zA2\߆:CoF#d}iGu9kf~^@Z;YZdJPB=V>Jb$$o²p!|e͒Pf݀>1jTce7P Rx.vwÐ6r*x}@(~2 { uJJ*?B|f.>cfl^H%rJ_П1>[dh?#_74L<n3q0vu/rAMmXWRY7BV0V ]'jVV|c) -ި*fuvw6`U&‘R\yؑ\BSo`δ G6aGAdq(N{3^8`S TvF#&H[vT v JVkgu)*9Ee@(]LB4 <k[{1\yh\mPrj& kApQmA%j t[{V8Ld@(Biɒ4'Q\yi-#ppsS\HMy8P58Bb+BaYU DUd;NJ>nMT+CS<(D$;3=(WfҜ+Mix;oO|K)R1=:3V0JbSb@B^!wX N8R8lG9[M,y??_t3˺5*7,,!8Cڗ-91O-E?Ng;OZ;A8>.Ȓ¿4ٟ`oۯ8;~?MwZM?2쟷zN~5?_~r5LJjyzvw{?oq4&(z0ЛWGWǷ_}0LdԚ: 'aգ˷귷 &%G{`ups=z}<3)Hc@W7~[:3= q zzy5$a/.OӜtN'/)b%I~k~`g_,W=z~JWMl~c_/ #ϙ$j-axpv||k~}si4W_KoѸs@bZ.j(yuQ?:](;xAϏA&9]$m2 W_ѝšS!L!ѱst]PI5|T{ۉ}sݐ?_.~˺ө:t}pkw}0 ItwyIp]~*]y=w^޿z0|4.&֝Ioohѻ~ߟ}g_NoqB^`Lw1^A~:-5XB+Ġ#ƌT "^frz'DKՁ*EU8nT {w`ϲŧ;j*%"v\ # =u[Z1:@ BN,!Vaa9y:x:x:xq2"zTק=eYe8Qc8IEb)Uֶ zrqb $9/*h&i@{$`ouSyðU.FBlUڨ2CԈ NI1df<l)&U P^XР XuҞ> Id |5 cIb+eD 4,.FY-mG$r yZcMgX[b[E0"Fİ;zD&ŹW*\gu*\guVp]UkBW"h+m2IҔߊ[8XB'nm֝$!Vª,TejݔYH^^R($!|N-},reI \ G4!M4uoP}ŴΊfXnfXnV,[UV&_J=}hF;QVZGi p =iw]${d%M Tʘ 8b F4"A0md$s)PcN! "1 .V~+?k|Yj|YA)D* }9Fj+»<.JO`LD N9s(UQȢR{I`>gX+Qp'?ċak?ڊӴ~K d+$K13YdH5cT `dthZ`_שZTOEOZd#sO뺕մM,s A8Ԩax7(0 K-A5 1&"-# 'Pj{f}+RmL\9k<ƃYj<ƃY`j*P!M ôۮ~tE?i-t͞Xn}LmOV:e*ˠ<(15+E4F@-O^ޱR5=_ܞd4ˌM;ͥMSYo^']^,< yKO|CSw כWa_}7/-Ue޷gLHҷ] zszn)J/My 4`ޡXK$+p?-t|08HR ||iA J2qޚ{l{Z՘?=]2߁fsrF孏+l{}(Y5-- -s}h8Jk .DdGchib1KzoJQ9xhV/Ο9=,쨳* #̐%>s:ILlS-E&/#)lK:Ь@)Ֆ8rgUrTT}FxÏa O^Xo5zן{k.qiuCrޥLas?ziLnRޅ.1ޞfn%I(*=DO>o@%P^oIx[c*+=-pkR^Fzl֯=W(-xCw׹ƦU6)7/RعLr  ]zc+ust\J!EsL\k WAw9!,H*䡌h }J1F>TZ ׹ާk $\>\ZON Շ[]❒fL EQŵd&)|<{{rJmwYk% #:;;Y琻X, -ym!lŹqg^HӍ̛Iv?UjqajRi3l5楕$f<ҹd<_64AŋBrլImݠ|F+=q/CRaQ*B{RY?f�@dBH!u /FL_L/s]Hvkּro2ͬ@6IbGRH48e;0&4i,R`x Y\?Ě:l+6-x>O4u0JSmݚ=aLN. v{02=oWBSFdL k [)95$VImz-r]~8tԌ۔qZS@syczHr_)avwfBA~0l1yX 8-ٺTnR$e\Wd Ic{ ~ۛӶ%qzc|NZ6xy{Q\NG_O2d̡mxBɡs7@2Vf *{UU V:_[픇1Jꌺ(Tqj/Z: -SK4uh4tZ: -6iR :u)y&iPKFoAs*;]D3#oS):P |yYl/w~CcF$(>z_;+7lo@}#iKzY'At}\O9Vj[I=S@l)i<9P"*֘ծ~}Ѳ1uH$!>$S^C}H!ѾD} fڻt7xv"`쏟i_bNK?XäoَYR+0ηO>SaѺ裉j!c 5)psbL9D hkfEV ݏc} g`}Lz`lf9f;P>z&Ii!-ƒ$"QD(J%$.Gplc@]=؆V > Hnnْ7 ?UƩrhe%޼%PEZ,^C(XkI*w/m_n9y`nv9v;P_R}C;\I U K@MA_W%]=)~򇥷h~kDuuԁf<[=;pc3v .W^9xq*6XQ:CNY%}2ژeE3Zęq' v~v@o7GoCm5 '%' $ׂk^'\MXhe#|r޿.ʞvn\Om{B72,u,m#бdDʤTa JIC` J 2Ѻ![K8S?.ymOxkU]L{xk^;9|D'W3^|(٠8-,,y'bD'LYP !];| WPLڞS˲S/`ȩNj}老- ً[t߅N^5.ygұ7ht_3X}LW_!j?+y짗w*m+3SjqL Kfj#6S f${. 7Ռ+G"Vd|jE)$L gU4Yr) ep[+Μ6]9 aM*bYqTt5=E58Ō^?3>lPEoaiڞ4185=xn n7.!,VvKnXC XQX-<(wSjNpg~w|x.?*_BVUc᧢6l5ѹ!KoA;4W,OZKCI6v]s<a~خ7]?OV6f@es eJ ^X%9HŽ lׯ=Q-:toڎlH>bdg>q$T'S3$eL*Kʢ`4^Ɛ+|PB4Ht눒FDrϦّ.nfz ?S8ō"&p Wd d|>/߾y? |[>Ҳop۳7_90~lMaG g-??׳NyہDaWvʷVa;kɁ+PIm~|P/e?cߺT٫W?N&/?Ou2P[\W7(Y<3.ta,),m[)Ok2e [a\g»#ʵg`ymh=D\m+ත4Ԝ` Ր ,j*j2yIiH0*HozCGg>$3l)[jK1~-aKqR~DdZ}]ܔ !1jaC6"-e@7>X.ДhYϴ+-@7#]bܸmGuO_sYON%FߝG$ C跗O_9)mS>fo\_r^/ώkGߵ#ڑwGOH&o2:4Kc0[DY!¢ F"LU\rٜ'''xT9S^xiN>bȞO^b5^s4)3.ݼW6e mĔ ܉xf'~ѓMif0 07^y>dq9zSSi*>;{adm}bɖQQp&gvF2 _#Z4UkM@׭3~ɋ0 _-EQUG@e4 ͆E' }+֤kȗ V_45?[ߧ[3[2CKLsdd>:+re%p 9^>c%v>xn%VR+ő. A(VBƥ-"AD D:R(s59,X*CR7b5]E^X7cɫϬ&SVRIӗ勳OΫg-IͷtKK\FinjF3-f$۱7~[;vV~ף!T)h/O==޹n80^`yn8༷[SjZ4j. v ]y3.tCEܒEۤv |WYed]V &)ͼU,uJmeCb;r-I.aX[iYmI+Zlڿuo*WcIWIhљkLB;J>'e/$dL3I~I%ecPw Z{ŨMTL|R_Mo Pw?TƓ2+*PNaubuń!sa[lNgt|+3. k _q$6^~Χg9%.3X@8d\cZ` i-TCrS]a\Nfh+5Ub줵щa4abDSXJULra>)}4N^?HΦjS?"ΛM`DP%N 1Y8_| 0)Ȓ,(Y)*^lt3fEy1/rp=9LST\ǟ-VQ0U{/E`JJӮH\ͮ6sI*I1*_^92!@J5UƵxYA֫>,|۾!C!=t>ͧșIvj0 ~R@2N&2a^a#43Rb!CEţ ۑ :bCdV$#\wI3YX}w%0b 8H)wO1(bImv6޺&[$ SŐ ٴ-͊^}^ @NgŽET1\1өDgz6!/$y{%%r?dAin.~x|o2H-Т|v zX.㿉v~vKw$/[{;6g[dv!(<1F $ r/f~:[$nKKr mpQM"\d>"IvјD5P|/;%܀,)Y Ͽtt\A JĠQFTS Vz0*f&2^Bl5Em1*)1EYdCfetTub+z[@ˋz)Ij avZٙTړb"/@eǾosYƋyrg-QI/v&Ec%N`%ë*4Mf?y܇};]O;x{~qo8tYZG0OQT3CPVS0&v4A‘bߗ\ʵD;bwY-՝5mU?opaX>g,D!a:g+,7FC'+Pl0vH,ńq"$i2ה|I 1k W{ˊz)ͤz&+`MCWF6FT 1môP\0eoD-e#lidkX \^cz<9<1|TMV~Ց-swKTkcC1Ia-lQq)5uj!q,m#:%,PIw>֔7N5K{'Hsi?:@OuZYx\Ny$(䜺HOhgoqD@ h5nB80!4ľP;ȩte>rfтȅJrf{H!UISAn*(}x" [}U$)Aab$r&dA$nvf鷪 Q&{?S<wz ZVRv߁9.1ln t+F/ZʠIY~? tj_/i^ҥdp%c9=VK?}t=U@J51'q3 vҡv`M NS5jͷ;Ŝ۔;Oe ?ſZ8ī۫xzs;t{L=1(G*"O 4#gT?}D٘=x;k"*QY :)U"S uZG:=6rGu4@i}{sڗÔPzljMGk: 0V݂v_So6G1Wn.OuP{hǚH/вWNץs8U鰴ŪkMRngݻңz&mkzbl^$[ `oCDF<N=&(Dd~^-u;q9m<߷yQgX6@g` g'nfɁrޢh}"N;IGƆoI`JSVU!z] Sn|%TS5LI'NagWh>$hoWj۞KC7wMυRO?x3c.!nťfz`i78@^n8, # .|'L0f;@sqk>\]`)Ѫd2xCm.5Se;S"PY!. h_>[5A~Y1q^L#.X6Lj[>.Dc0Eҙ1H#yzPö[Hߤ*RYf}VRЬx1|N!wv;dE`2rFKmӘ̪ue4@]}[viid*PLTF+U[ơsjއ07΄/c ) ^/::$ͫM~T{~sNTliTGޕQ͛ %1lZE՚J./Ŵ"+6E89w:K&+MX2֯:,E>/d57]^[jֺK΁OFrF Kb{91> &#WEaQEï89ʦZKTZ^@h2/D~bB Hrd04{9N,ռ,^?OW癍WVuAޛh{jl7I碕츖e6Hkuc{I퟉?~2* \xHO(4m|t ؎M#% o΄U^A3QÑh+H:xoqRҼ5}:=v*j8 eiYj=*JvaPNu8:vY[c%[v.l`8iOY&ˎ6 t XNMeFzܔ; ہ5sXYa%؜ 0Egq$s^V[Y30yXhJ|bQIg(D컽'sJ0Y#X;cD GjfVj%s5K c /{ma;?Y>p Do QrhuZ㼢!ݱj!LGCkR1 Y+!i 6DP+&k2#p<8oVJ ɱ1i&XH5ӱ,V&kk 밎f9K@`g;L m.Ќ6X[i,<SQ l(FkԊCkr[໵"dgWGBKƃ Pr1t ̝І78`0%)3A?9#d@0 (LU{iFN'W5w6i:yzҖeFQ0kQ um8_h$wjbKXN^D ,N"NjZ-80:z yz¶6G#7|8 (tKz98W#n $ =1᠍9XhYdUl11h-im>bJ(1xQc ژʂ`ૼ{8kc17/,㦎U~syv)9/f T:GF1-C{Z jliα(qd2ۊTKgU-4TR!{HrՃ9s⌟Oas7l7|/ WۼJp,{n>w]\Gα^jwmҦ?ϝkqs^z}vO^n6R)Bfھe.wf_8?}:۽j]zUxɴSԓ]Tٓ; A}J4ӈ%vR\jVKGUD{IJ+ȳ'+x؟뛷W[d!EWu~u/;o?N"$i%=@L{tqR;JwF.lWlz @LUHIsn[L*(] u@ĔU(~GS|1lnmV _"!p'cuӟ ŋ7lK/k6b%Df=X+5[#Ω笠cXERLOX_U I@Yq|Y_;TL2:L,y2pk66NJa֛qp6pA锇u޺5a1ҴsKXdz),0>u8xEϑĔ6_~7ҝLT\XdMk6tVksRd- :j e-aj25R.q"M]6J%06)WTq8>S7h8y=ٵaя}MRMhBbW g.'mPC0JnBLY)\oBLb Z?-{,ɑ!EQ|/㱝XVֶJG:Ԁ wGضv=c; H&YΎq8\De5RW,>bm2Gf#I~{A/]lnugCS'g8:3w4I&^뎧A*W+= x䎈>+cȜ?IƣgXIf\>8ݗ_/hpGڃe )2.h2[#c5 =I:'j6vaSw ndf-8wY@f1V-Thb hR>j#+*' xu yJK,8f+.ͫZ&(J&XS)mw/<[TL޿ Zl2F-`le^W@L[;_dd[( +;0]j#cГ匶G2Q Hѣ.Y Ci gka4Q/#,TPCkjpBNPHǎxMW1\DLv擏$ٶ \oFγ4źز/*99&!p"E[ pƦT~"t ľ~6A `ۿ"(m~/6oo774GxHw]{]gԅ|&׋')`O!! flYI^}X=rǬZ@U#A$5@0ju:ZJphW5բ*&axrINj೏)gl>a'@V|?Z!elpfk .XYB:Fk R#\F_J?àgz_e0I ^ XkFɬ.갤KJL楖^ܥ*"2"CrSZЏ2дR|_Dp;%3-}f"DqK٘2xhr|t#8w*+;;q"UY9U6]5AcEK:^l5bS([;@_CLAx!eAUJ۳ xL 4- qBV<Ѹm ݅ق3tto7L$t9JF&Q~j+q DhWt=)tqJ84B"p,L( )`Uv|NW(cń:zGB"CE845\#{̖ɖy-(~o"L S0%ީs*$`#"(b Ϋ5 3nv;߽ҕк _wL]Hlmֈ jClZ u d'В\jY-} @ywZF9:՚՚?ߵP3~vդ]*EA+n ri /9 yEk׮t {oa:VBP5w_wQ6xr6[U t}wvy{v/~d&O1| _Jsl҃*}Ż6B?`˛_b@@΅h[dP{w;dTkɤ,%^hk.04+\7 ^gŢs@@@ؘJ%Ϯ㦩q}yc D \i'jJN@)ueyZ2#$όL}?3HҌp?RA_,P_[\z/9wP GK7)5=V Wac}+bEBXP"U*zY}ԷZCs"sZ?oR PϢH *"רT4؛q<֋}Т$E4F n $:щ]vѻCڭ 9s)T?1lk7Y$:z5*Dw D'ݵhpItr"Z_}ֽZR_R w8pwRz)QZl^-9m<=,5?#TӥaY{,ī~vgf'DH?2K,.bFKz5OqqAɋ_<̉ܽSF~켔~ҥ3wu|ҝGŘnm[l slOh{S4Lx ,M&Y%SR%oa%^a}d36%La,m`)/{$KZ&YKAM|KF`P\BؒF.-)iMʁ־fآj-JҤM) ;dJs[P/8+Sn4mn7=,@GgXQR,Y&YFWDڸbbս7>:s<>Msm}oۛB!Q7WoQq[5L23Φ(r%:~)C(0>>iV$Fh E|0߃j`]ƄX#MdG(+@c@No˫ X~<^AMj%KªTEZB ;Vl)=O8gSgC[y*EgI嚠jѬ'Y u<)Qbٹ&ApM"Qe:\̕~pAs0ɞ5vi K$d1Q 8&v] .=+tQr#:׃!+0'^zmiˑʣm";|]x@B!}u8ք3s6fף~]vtyn\ށx߲s6 L Z`ˎEI Gyf%b|xwt?o \A^s (ѷ/eNTyq ??l--dxQ&}v6c|Isµ?'\sµ?'\ 'dۇkLd iIRN8' aTJE.0¦E!vχooG}:qSpBoGJ_$QOGYxG/^[5zYve[orêAKXop2AouLȭ6BmeQ&4OEB%3鿗܀I- 01&!VUTlW&D*B&opsESqsj701kE fBKY}Ap!-MJmÏ  N{ċSbP_R14ę=k6@h@+\aL %N6#p1q(L>3U%gY4HI[_,l 7RIiollXΐڶ42BgI9FiBE{}ҵod|ADGRHMMBj43̽([MJ:/Gu^s2Aɀ#A8~+>y'qf(!Rȅ+\Z ~FjKHA`RdPe~fswDtprCX1)u@)h(@25iz;}c(!1!0vhlzq}c-q^Ӊ>JBh] Ks>+TAH뤮)"#5+:_"ߟ(VIma&ir Z!ԷO^QH$mY}\ ל UJS. J*pN2,Kr)Y9ۂ\YM9ӆݱ+`0b'Z>~}J N×/f{O{w&mX? ;TkGw1b0.~)qClڼ-kz 1A! k>369%2y$?77@w#?Y>jߟؒwı&5lמ|2;A\TO јWL-Ro^Fd܆)0wXCneAd2Y P jz,]O;m{\bMXK֗KihlC%duѾćAd ZC\hK0Zk{a,Sf_b:%yzD}/hEtFY2@y;'Dz0_2z4oWH?~&˛v(n5| Tm` \\-x .QF'Ǣi%9 ~y%.UBiӾYGC [ ]|{` Z wtjf'D` (kp2F&p&ۯg]LzBQ7R~8Fq/ *FW$ԍ N4t5T>.8 mvXpZ(um\h8d^1rM¤<\D$J@DZNCWrB8 5f 89 kN3_Ue\8h9j@> fH{y2Sʞ5N= Nr[s,&}" 8R xˑ >]?VhWa-c`{Nxd/”Da +xq8X!ؐ {Š0z5餸MC<VQ8r3bhm|>8ЁU̞GA8]ܾ\ !?; ]53䂢 ;b7el*# PLP0z?8N3;( Tozf(PEvV"LuA 8`b\9y  Ug|CoPZQ Qt9hO2mj>i蝹w"QJu,as9G_ěлy2Еd6ml/"۫bBVpE}t߇<饛pC^j3 B&yCMEu+wXhTkMhpmgjG1H TCnRbK.Kwb݁Rilv oBJTjQ:bh '6syj;휧Y p'OL)F 0FuXE iJCV+ˈW smi!1yDy/kȧҬ13$bS[}γsřHrAi2|=i% G=5$Hp ,- [Y1< J-1^B:RE DإpU$j}b\f5vE'ZLFngVwDAmۊ ]vQ l'*ϮEȶ ,r:<%cE#iekrT5rz6r^,KQK*&VJf+еMNJ|ZRot;O&wZԮXng.D"q#{Zj]^9V*l~o<>}mt0mL;_4tS"ӃGvTt}-臏 ᧽D-GܛS:?{ĒI3@JժԔ#EҲ: |I-0h&5`(;U#3lf)Z:-hm{zM~[#gU^cO)=Ă*Cr1ɡ:dqNV袀8A[Wl+Aki!ԖB.hQB[܁-9i6Θ=pRXMԅogڵԬ(\mDfwZvl 2BѪ[l+A lz-/'PUqG(r.$ibk\rh`3Z3!Rr{@QR|'|RHSX)pi6YSk%N8Vh%BO6=A|bACӾb*1#D<C5O7ڴ6ڴGZ¤:YlKaxpBu^Ԙ1,YE\V_]1HH!0IMYE$||&}m2xȁ}? ]#rP(6c'qNaD# < "PD) xM?hR48K6[UYqKa_/ݩD Q:l "#+CΣWt t)tuY[|kA?X|,d{,S\ 2 2ЛZPJ05Ne"9U;dMX;ti.r7Yz:A|Eq\N]\ihScyVkzm^Z\t.謦0/yzgq%v'H`0e>Q^ud:.6r)Z >aHRUr|gV"1kYצYBZLp4u"ݭ @Az_U<[\ ۖ'k>< %H׶kGRj*l[_)un1e5lzGSHQ6j; Qn>GeV/fiVcr5Ң=6i)D=\!Vxxipid֣]FcU'͕F=ߺ"J(RIU L}H(’{'<igt3% Y=oikD. R+XC.A(VC$rU]tz}sfǟOͤsqՐ':4 oU40-r2%؝Pb*a0sc<!Cp_I:n "z2R| . 0m2#bAuK'ױ;:ѨɃ2QIi;nOmqnVrtLUG"V4!|C!mrkLxzkw&wxto:zj=q-ŷ =6߀+ŤV/a][lgAgLnZfi^90@namKfS]B`g9=_Z5*Qm&P =9NoO:_"VV򐸎/`,Il)IeUCjCᔁ-bu[F8+:paR#`dfD:.-=*Gqձ0Qh*K;ʥB0Q? /cӲ̵Z4Ҙ4#.D%kIp[{ qZ $Vi2=@z9Sfnh0FD3eߧ{aBDaQW^Ǫ.]\jxpYH wU;J ;$WOzh2j|('j'7ұ͗rw%Ik͙R#o`@9;QۊPoAMP*@Hܪ'Kk[cyDpB: 8 EchTYEpY^Ihyp%.\.u;P"oy@ L⹐93 o>_Y_|޳wYnv1ʳ|_G4Ia]{N/choPÏQnYi]˓ 7~d|?|}Oshyz-4k%izˁ?꼹ݸt6d |Z|n8M'Q<- ƹ[" ~FkQd :R`V8x jƻf thv)fp?MpT]4y* {~K4pE; 7qsއk4~/2zL&vp8’~e}Zbd0`} E8%q;:$_;1W2FLKq3J{pI4N}`DO/i(rtϿ.Dv<\',A"XatNKm)eܲ{`h=PwKyh gaNx/E aXG;:?(~G;_9HÐ^. ? hy!dQ=ŰOWg9*oI&7W`); Ѹ#$WL>@ʁsacU 1@ኃ;`]Pg9SSȞ 4 #:G$NvNtG}/ŵU®*DLז3L==e b`?Odf_??A8q;_z Q gZ8"4PEZ*)de4ᬄpy?{׶ȑd;[t3@? 31g_v!խH=FKQbUQd7_-2+DdĉHmQxH;p9Ƀ*|ic]040UP1>s 7 $K[DEAL~%_r-QOt_~~t_nE/c26$mvuE,?y:ےуf Qrƶ!&Vm-] A Z["AC5K9ōַ-1R2xĵ t,{ݚOĢ[KĢ}p\nv\~WG 9I0^IT Z w@ WmU>:LJBaLI UKO?zJ7wL~]0BL }B{J:mE)>u,.Karr7Bh')`ޥU2}t29|uzy>|n-MxΡG ;o 胗:^9[MaQp^=i*(nnZxc!%|L $k(>-y=.h-@8.Q{69uN Ǝnȁ:ZWDZ{.l;@$q]a5,sIeDU k.!hY|&EA,;ȌeG/J «FGf <3/t>ׂ Ld&oHLA2ˣ:FK4{e\YljʦljʦfyPr=A40юܬ3rwIkcY8ݢ_r}_TJ[)2nQ CLaPqRL#8m[c+IjbOl]*%#v`[)kqbDgb>Z4BG,JA )3e$WF^Z=ԯ{.ҟ؀=W7_Eވx@M`gvWMȚ8$J <:(-p%)A Di4QT<~,7PȤr8nMՂ6# B-*VlC#&` o/vb,,W$8̼$7,R?@X뮜b2#vwBCDm̐,vxԚQk7oT K'g+^Wav^3^/\Ç4/ʀ_(_PF]]h,R&JY%R;b$2jJ0߮ӻ>~Ӣ4X=[x*g;}h6YO|'LsqϿN|U4loituSч0 -b҅aO^x\G۾#s*ͻ?K(7" "Hqs˶ %MR8+$%/&- U^m7nK7V"ެ} pL* 5aR{[[LLN sbŔ佡=LOZHnaIJ0SJ_maz*yi7lhOMvX.&F&Ff6M ܶA5H!#TdoK]5yS@x!BV dDqVVr6:s,,pɂ+FI[oKd3狍Hld>LVZZj$0q\ r N9:G/ d"md9l{V A &w >H1y%-3&ch&: *RNK9^^)5`cY_ *e"ȗ#ӳCKr@(@$`6a*WzNJP[8NB#ǂ1P '@,RHeRx(B#XYy+J,՜^)A%*<*d g(2Y wm l̜8qzbG ҂IZ[fճ3guۊV{(LPk 6SU]e6x*U. 6${;({g5% yui=EC -r(%F30`Fު'x AXa|L2)ZFb/:`P@$𘣼+m?8'c㎘7+F9]"PxRJB2CNn) ^p#/1?So"@}V~F'<(iT/Q#xJJnx{׺#iI\nۙpMݱN>l 3Tmm RNl֒x0zk9V9rmFx8V[S|a3xʀ5mI ۨBgڌ EI&uޖ5iHR̖e/Tq]+aQFXIa9%զFj+Mf`:-ZJIM г-qV&KEIvw+BBGnf*HbmnR.ލ3,lܻ[JSKvʚ"Ym39e?L#\f~^vベYizŌSIFai*#HcMxdwor1K(RŔM~n%\KJ:ݶ1)fVoy߆"S"*R'$i\Yź5O(ɛxA&) E?Cs<`x(=5'QFR2puQ2ДY셡<%pN9/Z1e}`te(g2yP.Ff5O@9'VEN+QmpAL`0.wZ[2BP0}*W̙hQJmev:bPJՁKLA`i 7jMy0r2plp2%^ mZ!]{^HyB̖PD !-Ľ{)K{@”x}Zh-~Ć8?]` 7# XiX(E |AmP5šFYq(ݵ7%J!ݾ՚ٽARZk6w]єiZ<BmM'UdKlޔh8Ҟm7%F΁&?~HﻍmqA! .vVU K.X)+`Ŋ}H%.׶T\eZ/yܒ3n#z2"r=ep`frrAAZ:KLfާwO"ݥ1o"$sȋ,1| ]\>T6*dBX% Us-b9$2)w]t KWyvk08<݅a[Ki8W)˂ۼ'5}._W1~ًeZ")V0l5^$+ yCy%UԌݪbepV'uuK>><6C@:>y8J:_+P6:YWeANό'?>4׋v)!X7S> ({HO0'{|< u8)s'DM^!M觯u8<1l~Qe'7d)5mp0}| RW_e[qc0^m%a*૪ ǩ `'p2_/#Ș8,YH=4hu )hD`w=X0hVaC hpۣ)q089`X꾻 O/*hT@@ $`mHنOjg|Ħ;|3 (e2 빑g9 דGQ閣Fm/4b's,uo`t87vr?W+p458=3._ u={WJ'/@*Yl46P8 4TҜy0 Wjwf56&B^7;6*x96GP! ts7Bd{IΝ7,n&[4~ߝ>_..4|PΞ9דP(A|dJa۹"؝i :HD-Ҏ8UE(0" y2b'|?dO_K2E4RxśILd':YN|gxxUc#JJm$ ,0#OT:XKϴ!v|̗16 Ga88ȕРD/p-Eld:`rk~"-6'[wR{c+9 U25 []N)!o(&B}w/~fWe'瓬bw07vbaK<<^YK ŝ *:ڄ:I'Nٻ#& @t\u{#kxav$nE0GK:&IAL0\h1%iZn)+>"z%K`Jk"A@ElKtIh^X;SAS'Ӓ+ڦg'@)B~ Qys:hY $HHrͭ4t_W`l+^}I^A8F-x*g!1p`tn(xIM=~ո6ݩy [D]H z6**$ HPR3%A$HlrCHnY)P΃RDOFm=n$}@8\w俞M6w6#fIaOȫ!}w7+T? o}~R{f:kTok|TT?oOO0Lx{qV_B^Nտsao&7߹chgі!PQ9|'JrAp\3iŽdO -TȔ +!4NnLf"|V*j|}>{ka8o^H>ůN^|Cb>O,.[| O)ӤW0C?g|}?;p }"a`dL(zT5yN?*孲$ 2YDs8\u8y}+sHb,ߟb)ޞ0o \ 1'5XI($VhL9,[E56{dq9cf Z88*J) ӁFt=M6ufb?OZk.=S)SϣƷh/&竭V}7\Nޮ(5S"hYcK>@r!:j*`Wm({OIH@n0KYTHRP}=IM,؄f,[tQs\s8ۀFhsX` -5;~VVY5TJT6WFѵ&!=K2O';.6ʔhN8EW|MŮgUsV1TqCuԉ1ŒVU߽ k%3ֽ#[Wҿ'ӠT]o6q BG?~7aPZSt!$*\4k)]amӘ >ϝ@BfU_ Tq @7g>^\YW }U}?v$Fչ*MkC1;-[Wb!nס\I }܊.wkQc)LIvHչFnɗSJC%cG]iXeaR9.,9qgO7jA-gAAꡈ;Cd"n<b?T6TH]%ԦzMuE٦>ڄms 3ߙu*tw+߆9Lԙ*!l1`p4A|TH{Mc8U;N)c Ae}e墍MOѽ!>- ɯ3NݽY>g~o]*PhʵW!j聂TQtz"@tмۉZUo;r%܇׈Ь]mr M`hB b>*`n 6D8˙g9,g"3,qF٪ܥZ@Ir5 e1!q72r~Sk;Q+ v'0CVzGoK?~'YN4'䳤^(R 5 Hަaz_Aj PxF]hpV 7{#dI*<8ʼUA+B7 lꡃ5 [#{ 4t#e+%Lv1ㄶ 1ąN")4h3R|K,JALDI()H'1Ҋ\qԤ) 8H-|BHmO\Rq(lȌ*cc0T (+6.{a|6 auŋEugk{?]+tsqы%uH@@pD帅0HD/#Kf4Io\+u͟oLWeP|JWWJ@SeXa3jy;G]"LHQE5u?`VhBA=GF I.cag#;6vDnCf Q.qsDB_ǝ꺯jG Ȟpth]H{} H=#kzDE_FvQ?7"[;GrښpjI0H@`SCq+T-@ J LPJCݔ0MTcH(wmer[PRqSc7{یpJ"޴#>h̞C+w8U:FU,cevV1z. ;2yhKԽ2mAVUq"TJNٰ+ۢx!44)kK&9TV5E܆u( m* Dž<9jO=@td0ڸ ϗa5ȎYaC`GN}{GerP;Q9j Os|q Ls rC.2xD*uo ӷ+Zbřʩ·yW!W'L1` w0̎[\댲@25yqyA>=z{@}#ĻX\( 5нLY9ݏ6[T|q2J?iM߯r0p] VAL ";e B`woOąqAw3οnI.|_w{h݋ݪR5rR,)C7=Œ˼w_\&/j/(&_'}Z4FjiXo;P0GWw- ! 7!S@vYLLbF' S*&(7:GB 2$ZT<SЖ3Uoz2l8Q"nWb⵨e\I|qK76B}Z&EGg/bJFyh*򀯿=4?˫7EmzuP 䡾GꖳZ}h\5]YUlod|~3~e!ؓa] -.?8{k(іQϴ5,Ҫըb,ctqщy/Q )_St>zt y(fw]>bG<)*%l)'&r8/Zx倝b?e#>4obd~4>P4*eALhǴ=A1On9ȗm_%*[k-K<%|>VS)ņX re(/vm/'¹q</x!7?+[/|w$pr*K5d"bwߪtQB1X K6l9RlG]уyA"~!݉z7$AN$Q2sʹw;>,8}wD<w/#ZIۊX'fV/^ },߾J@u=h 1:pD %dݜtyß?_~SfKATPVEEdjf/v{#^:X`%E[cKڻe3F;A{vW/z[wY r޹zkMԏV=]{+@BӆҕDaxWC h8no}iBki` #@nNдsMq3|UcqX| %{"MҰ4 @Q rw;PgS-++ 3hG'⽇ 6FVV*=~>:_<>P.Q%﫻ch}|ø~!>fpİǻ +pLHo2%8SՓ4#)^saLK8"4q\+.|Ey$cVFPMـ̮~w,WD*D+F8Z^7xu&I"OV㱡=`3jd~6pa #Q|7Z %XY[QՆ]dK4J-2f̗Ee)az-CT(p4E+9uݠӹWa^7޼}Qͭ .p , L|5#FHCDmTQLx^׉.n}wtr48&\VlimҤgKf`m DdA({ZP&8T TC(ܘUĴ{(4T=u湉Ki%<٨LC1<@Z<虔ʂ9!=!$h*iEuc3BD8E4$T=тCCQBSb&[aG's7b=<ͥ6P{Ӫ3mQ*'P\d?'?-ҿ?俲xK&_M~i N]H(2?F_~#/'t3)1P@zbBiD $AH֡,'vR|\׾䇦؄cD>]#yP{h9yWuY~ޜ6Q{WV+_J=7M,nmȵobʆׄ"kUo0sZĈ­bwE0oy1Cmzխ # e"hІW^"x{|yfT<'a=leT\;R B{hpУi A}|砀h^ (:` ehCLCD~90uRa$^w'Ý- mG~}kdDӃ졆^G>}4{Q묢Cq} HO+Id+'ss* <Y1d9BbPpI z*`U 58-1L @P:QS>^JoN} 859>9S|sQ9i6oYJ,} |8dO_~r[]qdq4}EW_C7tTM\+m.N9zbuZ;U(e( $noD>JEʅO'jrx>OMKJKVQ2FIPbș;15/C̈́;ovw<|G;[v͞]g(S:m~Oyn?|8Js@/q|হ!9d4^s̋s" \ѳyvw}iCD5`!IeDuzU1hztnϢE![C,Nyt[:wudDP3P M(r\&E@ׂ*Qr(%jmVLÈ P1kc38CtPW  =72qg٤S ŀ#0(OF 4KȜFNy KZZz1 ғVEnf}y󱁞k-j41*nlD|\Z-nhK|8y7obu!{ϑ^\?c5?]-sj6?;8h _EpBFJE8Iخԡ}4_Naڴ* |!]Lx?m_jPmmt9K)zd~|5KOU] JV&qT2}vo~U*5@1 ܀3lROU*{b)AJ{)^PSR FD@Q9N}tRE̩Ľ jX NtR2{r5gz5%ƏTᦄ!Ld"gΒ2F`$@v$҄1d4FZt;.HUZxZ4rw|&(g2]/~5qvu5<.ǜ378UI?G3"y̻fEkG$Ja r*PN6po+P41q&JNEmMq`틪"k_7+s@NV^%F (<sQqilJ  2'P$xg\t$TX`\j S;Xi/ߊ4SVD(-B&(&4\0X&jXHspQҹ[7kuj53-#i~#ǩ#̌,0Jys'Q&40mzQ0JيoHsj_#LeSmm? Lf-K NH/Tt/sPAȵe[G쀖h5wxbe@ B ђ1r.SR>Z*^rmNU֙=B5쎯/uj/:Ny;J:Յ XS ԩщy=Nuh&uү3XY]Z;!GSmg/|y2h8Hv3Cݒ:B@-/{+BUhpmXg]n:3PջUeݹ6\DGߡP^](6|<;kۗIJ#Jjh"i cSDdm#@U;ƽdrFE&P&gBxj$g]<:A2LҊ3ƄWhp'Bh#'cJ_Mg+knHwP݇b13!d{FaqoV`"Q(CE:˪<`Qg7N[L0V^RQ*;@w͐&DL `qCbX6y&m'4NOeMR()SRjr&nSRsd"Zkg"ԫtм0B0):3k М ^zq^3/HnB@si/pjWh˥Uv/z4ti5N )r,Ue8q iUiF>d_A+0T!| 2N+˨se犔'Mv|9Tu[#wvY8-M!G=MzhBhi H_B zʯ/iCT&}?\/|lu1^|.tZn,V"* .~ujDc)yܮViF$άJ$foXꭖR1cB:裤Wyn7PBmJ3ֺU^ueϟ=镢ʁd50p2uSa"&ud`8Bez1ۛrj/0i&R|? XV_<Nqkb3T+j@Kg{t O/2NH)5S~b:*Ш p+ DY&`fRlH L+# 䳅$s^8TqfrpvEXHJ)lGa&09a( )#2$F\?.Ve@}iV&:z۴?@>:sxsQb@w#|C>vnrsm.6J;dE)qꉋW03~V|f)NN^&K7I)H\ ,g4nNL޸&d&. wY 0pyMm|p3uj|X7_) M'qeC>_W*-xQ']Zɮgl%Q9TY$jE v$:6{ӧ6Ŝ Bmcrr('|'KUl왜tR1{nrrAZEӼk8VI,B{|^B}j:~GR=F_CZ$r0["72Pgc@!:u"izr@θ`U8YJc%Ǚ {<\2ijf,U 28E&{P`HZY)pz_T x/dMo!+q*ey+o_1_F@IJNWs>oK(QԭD`2DmeR.ztԣ˪z47'$NʰsZĔB)u9{塅2.reZބj*e`gQ]4:{Cէbjz39Uԝvt5&?ooYq=_JT엛V9j+5ճf%<[ܮ705Fh1N-؄j> -դ<ԈjoK]5u%}]yͲA§83!kn~=@JHVeGd~Z{рNzŶh's,uN.FsINʵ-m7a^%ƕYWҒS`Rbe^To5SvDb]9lտg2 tZ?jxyj;T b%%߽]3F&\RBqO~: |a1z0Mx[gBem+5o7|չ"XV:j;b@ĂG%AZZ,a%q{ay8iG0HoA@jZu(-O`e9)0k6㥈Et-%R^~S9MCBg6wRr>'@-·bm^xq>k@!~8.Ӯzݜy!(؃?ZW"hI$;kTWJ?x t/j yt+aϘԘ/Z1U?R~2I7?e|?'E$+u6W'[^1r&v 8AlW[Bx]0W7^o.Y]O6YVڅzM Z=z  zoFӏ73+3XQ,lcld<*r"vmTL0}|L=M9c{U[~c&$2$Z@0^SVA+9Tm_&2IbZ(+ "Ac<.:UV2DQ4UDzjv֊MrT-{eB-^ˑuZ E7P6#CfZKkTԣnj&+ٳ˙\)$@D;;jmgښȍ_a%I%ŽW%^Sٔ `lfiYK{q*=!) o"sEծmIA?tX2m Ͻ* %1`g9'Io8ҭc٩M!om!O+ԵLh=]- 6h7IMrebRk&Qǩ^LeYpb&; H|loj/* FXg,WF=%t{<%{5]iM]OcP| '=|(\k$qZsb~fփ&:JƆLps ^[ar- 102vՃyw0_^rOFKy5qP| qx=phΖNQՐyl2VKKF =3j 8zy~PΘUjyvEMim@5p9ΓļBH+*o<.oozA\ZA(u oTߠ f߹CZ>J䱝ٻ;<*ǟl=U@cYaxT|o^U4TEo?O?b#*Lh.CHp( 6wpsB+OߪXp)`E9ߑLD&2*$R;7_1/\b!L?)mZϣn hN(\s oLs|yA\dd7e7~ZR`kBʦ , F^F<ZUJĈ &A5*3,RsI +16uKIKΗ"FT^NGhWxxG!s*1bL_ Xʍ l Cc.RQaFs(_׵qN1 ٻFfCG 0#v.#XKU"'̂ѥ4P;٫'~ZK g;f6u߬&IwWY-"q㕌&&rR*TpىVi߶UP } z,\XPszn@^f=n=+nbVҠ:TCHޠNS-)Qx4izՍ@:7;TWi0G2ڞվ8եtڙ+ۙ? # hA8;bHud:/{OgH!FYq.UZ.Hԑ֌h`TQ\g?-OslEf̧]aqlubdEF 4?d@mij{}u]Uv%-+&rXo)Z[GS^m}64gdrq&,@,+I15hx۶4Mi n1͇EC6V-Ňc-6\]hzӸAȐVQR$o"wIX )BP[lӪ>v6n 9/H`g̈́&DL(=XȪϝ0XRK E! Ո$R|"l71cKzD+ c|M::T YP3n uz+ۈqH\UױX\nGP?M>9Zu#82Hrwe?<tbeCq3n /@,@1h 1Qz^mp9`g=^V/>LJR=/3&@-GԻ" TC.yd#%>.%{6n@iZ.\qД7*Zg'ܙٌ zD5!uKyH|}LB6n'uTjէ-j/ۢ}jCg/8탱2.Ʉ&"s(5lc#B#[?rm(!D1.|Il x@у]6rDMtV2]pU#ϳdT̍nCdSxNps.ࣦbrz*wȤ\e_b6㷙$ d:76ۘz[ ]L"+K"WXe+E \q乣LLS8wbyEjY1;U&Mz5yW&NyL h~P9y* ৯j {rV4Ph $+J'=hm~ -եߋ_t>oD`TL"HETaMiKڲ'v~ x/iD4(">M`՝%}#W;1C&BA2mcPm;evsc{ Iv= aÇt!h@e gwEQ\/u,J\+eGFBG!SI͜J@3g9nJpJ#x9eʔ+*t&<h,QA*2Զ =4G=4V7 >mc,hDdS>ٔ@}^ FHJIZ@HC{cͥsU:Hh8Ldtuo'%E8D/?}hnsi066Bm ?ryHSx.,"&Gbe7ӔF)%V,*0lpY@ 2Wx`Z=^ 1Y+egnC5Sp֑q HGz决pÚ#Hl_N 4Oeffb{Z[-i N,DsY WnsqǤM5YneCl8 8 Pޓ5|eAE0̜ٕ+J7ɋ]Z&%sM$T +JY(΀1}L1eL$ /NBdz&:W:P#s|;Ƿmq:<rNkGrg z/xQb aXHjS8ItF~֊p?Ƌ@NM!u2v jpX N0 2=MVObeõ؄M+7MΥJᜓqCL $U<'˂Z(3qw.sB P!CaFD\8ͨb\S8Ul*i 6_-G!nKsmL}x75@fueey酜%O5YI1Z߰o׿<(_&#=>C˪MHk򏿿-ixXϳn4+]ۨ 6F(1.XbC+pk;L̹Lm4'ZHM %XHJ̦XRrKLIܫeIs/HAx6$)Йxdܡ n4R1f0yP#8=R( 4B+_3kMDn1Q42(<h$Bi¾ =W7w&;9ͳi,Lc͠qKsNHC  ߗ5osDnB-N6zBɬ_%Ig/C*RJ(>8囑I1xۗ J-PS@Ia\a<ƫ!)EpNf#Iљs$UJ;851(Qf-nk#D#L*I'$0e'2l_# jGh&AXTDj̈́D,S8ǜZ!g)LUSOCpw ]R1GD3mͷ>m+M*yI B jT2ϫ'7A[/1Sgpݪٗiq F\ ov~[l:@yr.G3P@x ?CM,>t6 c|,93x;TrճMTgqA_VgXJ  %թһuhvW~[v?8la5n$DALD'8MtSؚxnZ&׳2utV잺𭜟6` kdhO2LUƌwԤ BC5esrf29Æ7ZѪј>d4vO B;Kprإ@n,$1"ە{y8!uփm!: Zd'gy+zP l'c!4M3hgk'S I>m?O?Ƨ\IaeZ_3ơ <d]9X@CF3Ⱥcd Dڰl ֳMQӋܾ@hwjWk7YY*6 V;Gr-uƳ?9(dY^SceH2xvM&=ӌG,;-xƝS(9VmX4exvM&;LF-Ϻ6zg5 oUϮߥNJsW z6m ҇ p vkgYO,jɷFdڑƜi09$);{q/a G5vB6̚ =0̋o}G[`c^瑜ǐ2†gU@YΣ eAhxǀ!E=T, DZ'B"hcH:+i$y"N : NK/URHMH(rV͕ƒ6`ekNem#Gg͵j=֪r`X/M>JPp&}^YGnLN`Zn ^ͯ&j+|t:vyI s@eS%VcYbC0RL9+K gcCaZ'(g #aieZP*@DDM=ϓa YT77_ !}K!LssbR㌘$˰7/ O2eN8d{@xv'BQM8v^hg&ݩ*[i*D̓b~ݭ- 1w r;*]Kb]!c-ltܤΰD$urbZi__ :LبmSۄM6a#dphxjMFTJoRdfYYDc\cvC;a ]b WS5/rǍJ>61)'. jtӬ_ XGwΆl!X~[,;߅>l0!&E(),xz=D X 9asaʤ[#V0@̤83(KWhjd8n5iŸSr5&]f3zdNB0n w~1_ׯ_/}}ƺܾ@d3I//FxXnn2O?}.M0ݼT\r)(`TD-9yyb|-:|#~h=Oyqn8yR`tr1MC>s,USLlhϯ^u1ōZ|Nܟ 'F 9Zܻ?)|}Dۻ7j>@mw8 K}ѯ/.n|4\Sެ {דּ݋Lr}"+y%U ^3FqސQ=MEq5*,`^Cοl;ݮǕ?f)Fδ\ؽozPrIV؁iwcTOS)`QYT-g=mT3PҔ3Π!dH%ݙ8:v݅Mi0S9Mb6KLZM~!D f17x D>x7@#*T>^_ k ,8:`}r⑨c}¹ â"}DFb\vtHha^v>ξܵ|y>O{¬dŘ )i1N nBDVa,-E>0>|v\f[Vm]Ȏg ݱӾב{Y3#O'Bk xy3whjYY=o6Z2ms-Z]u;Cem48 K?cZB@- Ɍ(+S$_ Iy iFu.ϫ KA6BAf@v4ѫNHAE!jD5n`rHDI[NE+ʈNK15FRjTZԏ9i|?z>w)~8Z|޺]|2}oyJMW?/O9u}/G/jqt&V,F,=YY͋v8~J_2YP>ߝlT8qP&E j 9ß?##f*vQw}h4vH>H̟c"d"эh B4 `47tT2/w'wHm5qɓoD#vuH{m)!De XJ"R(mT,-I>@+wDh2㘁PmE4j )H qc܎! %B.J3Pr-gNYWB^Pȕ.bf$(B_|[x3T Y#쳳f>;ȣh啯:SyaF:dfSVyvu+[{rB.WKʳ Ǥ?W( w~s Nn"n!4&.Oȝӡ5ry -Q=^#sڞvACPݳY P8>C; _{q糷`@Aύ>X0n._H'q4x|.P?Gz `>[0T et))29|n%őxj;-*o/ \PHs}? i2gCg!(BE T9* mRamL;]?|B!UuSŔ"_㊩6@^z6M&ag:E#0mʝ">½4`krelU ޖ8vN-S| oHlDEҒKɧ 8M|] d<޺J*CpR˜HK$f2\r߹>s{qܨhIȃ $qk6&i{s*Ù*sCiV7 }d-'4|C {i7hۥNnܨ2cSƆbܪu>KKxPpB Ggg X̖S xǹAp>7VtYhy($\S8UscvߕO =)zKՒha?}`yOLχC~lni]!pr\޵6cbeR PNm^LageERW'e;KPwu]Il;ṟ.ƅ`E|IcAS )m[`bPU]ík-B1i( P iJr u2a --33#pgxhf$t/_ t:| Gɍ3SmBrFJW1D`) E2Sӯh3'a1㎢X;/r±aWmL"DG1c}N~=*;NW0kf2yb^zuq7|t?w5J7ey:8L͵*@2!T TczT*KbBՁYM@RTtJ qۉ.AuZ]W[@0_5RVh c+-"IXƅ>M5^ToF5ʳVըrڳb`mXL$clkio+%uA+b:,xavNErjgøU^1a QXHk#č Fxeo(s)f88Rem$&Ȓ %ynIQ׸Zh*2r1@I֚btt=+h}'6Ũ.V_2q8!6WԝY+Ԛd#~ʛ,LL,e^D+z7چTVG&LN5sxғsnp"xצ޶-7JIDiRK%F1F*3#++eۂg᧵1!`,OyX¬v8O"WJesmӘWerKUXs*jF~6:B5,2I$g%Jk~<bUsL2ۻZW8ЩYXv9Vʼn<oR䁤5g^#xT-AuMbHpbqHr "KBnH6`FfyVFt{V\c%7~NǴdgMoR'Ϧb,yfxӢ/bXc3ΆY3 )FhQV@2n60ˡ\$Ȯ>T3^T⃀F7giy9A59*:{t^_H g\T>60fLT/au?RI:強 pCwUh0,tGE%noS)݌RйX?vαm}ńÊ mHB#,"[Z\$yU$=L68ǷO&ōkt= jl4}ϙP1۞QHA[7*UōRn@ Z9 ֜t4 sإpd +G^r#Ue7Fk>>wCe(l]м!KJQJLSOyN[$R{!H6O‰ӌP &bQ8 v>9VʍB&d1srz7RQ:1>` IkuaJN>g!X޹R$X9{Rf;wmR oѠ˻@)[kb'pSPTc:|:i_ŞsO%:"876WyqQC㉞Oy/̟nO R5t@{e͵;;' PѶ%iaYYsRuNbo^1 ,\NL4pd:@@d!Gm7 qig%DP-JPK^![Que6,iߒPfgySP}D(+7lxw JyHb~y9S) {".H+r펁 $u+vn>{{wHܧ5xwL[g\cCZt29wFDeȗH5;aB)#U{U1XkOʳGekt2v^}ߺ$S>~7'Og &G/F3moru,򕖇q.T,D<"umD{pv<ogzR2N*c FOBň%DHIGL0),hČHp(Dg~ᡆ f>ᗟ.yzy;̿ϋ_../BBZ-Xel"M+R4b1o.lC* d^^`!J<^atb)A<3P'A̩ xWDd[&\mus.ɫ.KrFJ1iy ~So2j`/qς2 ,z|{loF do/.;h32s(JDkbrqD3[fIQ9h;tU$;5Zyt PDUJs1ˢS54ж0bm-CŖN1V,mXX,/?&DZ8Hu`-Oϩiw'@ȫAUqSlj0EzQ+$Ac`kc1q1BTxM,#c\vjx f4NpnI,3g2fN0Oƌ;bD\{-< IH3Fbbc )nlMC ct mJ؊gNd?:3 q.d^vhN.2>=}^?7~';xvdF^s{A\ ڎR<@k-P' PuuEBi[z=;(U{ٻ`ڊ:3z9e)t=7'_,W#eu<;fǼpJMhl6%e˗TU6Ჱq@/T*oChc{ZP mUyq"&jHhklMeӨٔ6ŕm;[2-]  D; 3~)L9X!gV0&e()C5AJ05'Z<PJA8j(„[_Sy.EΛ_/ r0@”&ѲD@-*Wio2;Nɻswnr8#zOY -q5tH\A Nܓz|%(O."W4_)%] 1Ñ&NM)\<_'zѧf.Oߩ[|p`׎44<ݳKK89xX{}//biCaFi:~-?=`N~(8stֽP1a68Qi}񢉰1Ksl Ԧ#] MlDʙ&J )b*/ZZ*f4Q`D]Ȧ^ce>ӧ E8M-qՌt`z2~zExhK/~I.5zGhJ 5_ $uc*o  "cQh"zh b D(Up2\V`\8pČTzTfo|})F*bHTG`601pH">-s 2ؼm[b5C%%j!:7Z! q)&O FiSTr96k(&]vR:wŏB(rPVi=Ung%QQ{UƤ"@(I.2Oיal?SoV.D/7ɆӋ2 ~yy}gJU?n:Г(uAK&@Qgr<*Uf 'a= n2cxIdh ТUqb,('L***.q+8M6*(6NAdÝ7ywgIw_I~wM.X R5;{y0VTNc.l{zgufi.fVu~Z7(1&0&AUgV![/b? ߕ˲Z1 Q Q6$Ox), f0,~AHJ8ҌƱϑ/4NiP*6ޕFrb-2hdzрfh9nwVbUeFFFƁڂ+-`,l]OiQ="]wwnY߹Oww|plU 8ۣS9E!UEEW}WT.jw.=K+>Kz8TL4a(Y$  !pA +iNldA[h0l~t|:F<%=/!OV5BQ\E^{L;Hu_/3T_s4pv7435ܕ@K]8W9.y?Y"%~ϼ;vmD0soēc+ W ;gayAi"t99,qm a*Rɂ$QB͊q^BpLHz Uw)0FU 6Rv;V4!nǠ!# cɆxwqN4N4NP%դ9IְpӪ xA 905]7! xBE%4 BAKJ6TiCŽ!8.'Skvp ҃wn;8@O 'ݍ'Rk]744 u&E!Emp QD͇[ȭUݕa8}sěf6=Wq>1J,ɩzC2h"%?L*Y*>\:ev6ƝɼNzWŖhuXY7-V`إ=vuB0+^Pšê!)x[V)R2dk92UjVF3' YɧOAU_UT!uخ]TА6-͡e>g㈉`\\&c)㰸|. 1˵؍+J.7L FN#=8Jݓ_yͭ˳/͵}"ղ\̾ᜰ;Ƹ֏!t opqLSՓ{3 194Lތizfzo݈UotnNdQ^эPW^6ȭqȶ^?JZw/n&_t612uWDtw- ¨Kӊؓ͜\c>N^4%lSMzq)C:NgĦ4Pi4S&ui8|٥FuXvhsBCJ8-fRERHɯ'Qn%VѦ 14*TY@t8} !r|!L+u*y^{m:g]6YE]t 40S42DĐ*ygBQdISq_'}$- Zcf3_5-2:MY/u9k ڵTJ8*04>q_ wdTmUA||s6'!⤘IJ*) EJ* SzQ,8Kc\T",9cR'.x~pGnZ~.fCF)Wi&YpZ%KvJ9Pgߗg f<>RǞƗgͰ]fAܪ޴P 7Y"FXiPv|\TiRy4 &`dp ۩M,ąG'm[!6+U"d4tMS_ISD.-ssF!cYSqKQ݂R $SyYksް&wݬ3r"A9+@[9Ͱ2 hfY,!, Ac=jlP1xϻB&8^ idumr SgC<\ ӂAtlWCxxG[^uyWmhCrIDDH1fxjԩDVxF)5o4cVkdD]2GT%)l9R{0Gj&X̹`F%72GfNq\R\ɽwd9{h*.i<ޚ Ӣ?de+Ǘ:C6TqTY:Ʋȩk~:~ P:}zpJWQj*PAZ>8eDt2ĔZ>nhʱ༭ah q rk-WWhkC#5 Ν+t>PЈfYv6N3v]4?[geweBxYzppgIM) esCn׏*QqM~ 9+у8 2#>WL6iz.esp@E8 QWJ \U٘ |i3($yƓwY99bM܂8z2 ÀJZѤz\IKzlzbl1P%q͎m'K mv|oHB0AvΆm C|XZZO=y3\R,i2bUETMK@$Y ju ><ڡ%btT#>kHs(-6B[s"D유X/o\tc4FiRe;ަUܳvS,H=q$6A\]l.*bwf4ٶp!WOx%}cvnW5qޘBTNy]XL"Th:UH0r8vIcbQ5CK>|5?6050U"옸Ŏ!Y/v೘(59֤&NN!MֶGÉ$RݟSIm@ݐP?,M?d%[v'Gs:Dц-UO Zoϳltʟk6l%$,mGfP9‹ۻ+_(y0b(/ڜImLlx#idzn10]ﻧ3X >8״c>NGF:A' cV="J.;:u<TrUC-xUb@aABk/R 9=OQTa5Yc1 M)xA^-c]>%wyAX}-cD㐄Z \j\YXVqKg~ +|8_NZR>?^]76:/ m %c:J~zOZt◱VIFSʽؚp{K$xxE@n>ߔ׏C-+mD}C IAj,B h.Md`Pxh9o]ixw~A4=|fG ̧!%ё ~iLg|sQ"ho@Vꏎ 5l W:>^_Fٯcq?58R ' U$_N M`eQɸ/m䣔9wIŖo՛;',l@T '`ˎRĠ󉍬t4#neQѾ;2t,ͷ#hY%j/);y&lT5ڽ7Udz"<7vO$Cv g-Y vrlgc6˿[ń5+-h UHb-t$p\¯_kI՞uޘF \7h0hǓ:3B vTr!{Ι;ﶱK+ΰ<]f6;݂:n~DB5xJN^^OYN%dj͐6mSIJǒ+:dtAr%xGĖĞT>\6Z1!hz7$S_<ZPn ha0<*{ !{洑 9'Y0 Uy:lj{wkkrICpƛz8!$Fn_aRKC.-eFr ",zעD!\5 gu/?'$Sl0/| m*O^jܿ&4*LQ﫠ym2y ?%'euش|)/wLkb:zP1)eꦎׇ|0)9|-XbI.?[!CG<^U4!XKl4֥H°uH]!5OK/x+›IGm€+^5k?޴a𙲂 808 "Z VM?ֿzO}8G=Oz;S#Cz0ܕXR!7%d>ѺS͏C!Td̈́b$`XV3sO$\4ijO{{>I F̂ۨץFRM5L) Z+$r\RN8akI`VM(iסN=gd7>kA-2c古 )DQA]l,C){Vm 57톎6"3}^0 UG륑ŝU8V}"<'!4-/:; ec ?*Հ" EJ ؍(A+MchH!AcP,A s.?FF4|gxˑCVGŠuP (T*Ŗ!1bKu7kAM=q]EC}5Ϝ@ke?Vk\i rF40טxNdY4_+-ʌP(()'B珪<׼Rzmp#]a,;J:&91PB@EhS@(daQ "2Zbld8[JF06.\ZCBtdSF2٨$㍭瓽ec`8,pN.t9Xs@hrV5+EM28PKSW1o_؝b(0K#|a"- U R&!4v(%@1X0Pr̉>YBK“f<"z$h19J>,W=>(dѰ7q+lWWFƢ)0hu;Lh`ܤ*'U b}fP<`/-c?3<1aK 2R؆7?3It Ufo;Gn^8^#Ex5Hw;f&Un*٘uUܤZtB+L1` fueN'G Ty&rY“.6뀇x j*=j]~eWsoW % "xRd*.e?[@aY1cSlas5X73IUKns*XK' vJn .A,s2n 6V]zgqԺЋB`q̡vn?ØM@hhOIlTmM\A~?^G]=;xي#hVZTMS en%nnu" mbdjy]+ n$\`sQk!vrb߷NɇM!);5nU*fs*mSPnQkphKMvK0lJKzWNzyleFR1v?o]z6Oa/ cAqD¸8b6Fmt7Z E5Έ؟Yx)㌟˝#b\9bۈDD\;$Nqb&3t"3"X63I|4r"(h>4xjXiuK.!ݽ4Cwl%O|7:" ;~sǃ3Kd/߿>"|Y.Nx9EbMϯ öo$!Y/taپqR"@VX@w4N8*e, J /z-ğ/M~H" 8P@.CRJtUX̍a_ 3}E;쏎"n"rQxG IGS-(I$M=J9 :qEDzWOl:AV>+0c{K"5Ȧ ͆*oze9aLbFpeiUhl/>87tf^hl=Uӧ6Yh9>iLISC_g85uLۏODہ{]ьqCu׷]uQxfݸZ@տ;zm9>+e 3"8Yu9Kw`PL)Ffy3J(A3(I7Q ףV懃w`*Ү ɯvxLvJ/Zi?7>L}?8|a['o'_^|rr~v 98Gv| 0Qj$},vtW///J_>:9-Y< wO03u~{x}KϏaʏw~ yabU=8OY,ZR4 SqP_$e)VP|?><*G[ΚkߝKo1R~b߯v\7^]j V? 0j[ȿd ?Q7~ۓ`4Kqp}=%9ǣu&?1?x+SB _$CӎEagȸʓ^^r 3 i?[ 8TgCH*GP(+Xp#YY"A@19R`7qX6G, ZNzȬ",id MoZ, A2t,B-XHDN@ι'ѳWM֨|\ ) WJQ|c Hz_Vd ibVmziq&`!K5c~j4~-~tW_*SFo ts_~MT&׮)4kTMn)]S&,M9 /4"J6o#3hkx&:j4 M ϘkƬ)}O)>R;~{g^=t;$ض,2AǰQ֡ 8`t[e bgU! (ڊI\6erlA_qejT9 3+"_hySqV֘EIC>Q522X\a,ƊVGqh|qb}HFB'VÚGrI ` Q,R:#s8:ŘI$ HN9F9㰊RPE|YnPd\[i@rEd%J[<d-&'s#ɰS!d97Z(t 9 "a.CE1_xtFn`B-is,K=%01:;E1wHAǴ#%( ?`덲DVm@.Dm;ńsU:I%5DE kXgCX8.bc1ֈB<oVLG`%ەyR3Kev$Bci1 #N NcEHU4YHB`k(7q'eTpF fxCXu( !!@YD)KH(c€Ӹr԰+b*fW£DTY~”N YvrCUag[}I-?`U.s}c6Ź 7S~&#]ƾWcO2y9(_ggJ{6_x3:nQ4;iQ$f,K*%9q7CR-kzQ$ZF;r̝{ˀ@wW K~Nkm2Znf5"|{CXgpfu=mb7fm2Uy3 ;vd.YٹFNiC5mb5l`Cp b@QR2b1+V9S)*G+W:覓+HY,?f%)᰸kAn^Ut(p*cwllXneV=6k?c`Y0&/ZWPhj$5*Α %[]sܙ]!&R2M YcdKcvIV1}w*&뢰oSk< HY986 rpj B`QEۜVQ@wi\׺VA ջzY*8垶65;otj+-^t*Z/%|AvP~lbŘBe&˲ f<G#XӐ.ΰ59=g: ;Kbv U/Y-y Ȱc 1jT⺍G7-gfќ|DINaMn)oR d9%ż닷ְ?N@K7c 9)(a}.',yu;Y<'|&'z&٫W y1'$h7nO Ur^zv?#$w;KN'5s|P^D}m) 7CJ3wס\a|{{ C¹=-(*Eu;.4 e~*6~3 0UkʜS ׌PH;_bQZnZIS"O[ڐr˗n.'b&  Ni)@ k k%IsuC?LٓzhdGx|{,̙>i/WBl!qɮsI3F3|W0',}nN޼:۝yNͿyn M)ql7jA.((q^BF3 ]xGӹ~Wl{Wȕc: ZwZw e וpԠ4>fM˅b]r*?ňNQ}X:$dsFcU=fH[%C\Q0>sBYlbH̐u..ۙ<>!1yUr0ވ%{0؂I=DyF_"2ebaBboͬ`C,Fc[ mXlk:ցZ#` lLڂpN%J~)^XT3'nrdMI/OKCot2c`ćvp[Zhh% R 8"ѻp0 pǔq?Pª>L+G?hӀz}gcVZއn4P* `mχ`XV_ ͕ƃ~z7sea*w7(%>w_☋cZ go/8+FmB(G(ta:!t/|rQlbY\aV1yYFѴEmFi;-~阅g(ޝHqEMXY&I菵q*t#Z kI9orQ(sۇ2*$ fG[*w 9|vRӛ}>L׻DϖEN ,e$ςV.gy ʉGK-0% SKkÔn͓`ZsUgsjr: f`˥`!,E]0+>*8m탦c=" g9 dAcMuT^3*FoEZYvۧ2B7{J(Bru12Y;:mkInǣX}n5-B֡N+>FCʖ1P +Rt<,l޴%S<0e38!t<."QHBQNBV>bȧs "!"$\%3D~H1@0*@CsWJjvR{.C ,Wr!c1& <@q G8 zSI&o#o>0cwT/DMR:æ`޵[agOo]~h_kmuT5)C[)ztpЍ}=A>ǣy͏jh^F,5Tw0cC&1@JVw(v]@+E>! y>X0}!gJq>{1bd>d8؈U/#)7_ZY%ӦJm6}R" (m\peB1l7AX<_EhߘIQAX aCl;v(h2:a]8aXYªqRA!˛Ut- }jD\kC 6m*5^n6̼vM&f2~i:i\^[A8{?xsЩ {_F\;}JGp?X-=*UrG^&zvqJ"$ja1D\'z׻s#^tŽRt;5\)JhxYr;wxdoݮ{3 G7#w-)ǣ?ɽ"Ut88mGG76ׯn~|{nwv^^Ww:~v~O|_g]ߣ00L^]$i__hxEϻn;<~N6u.Px:mM8׿S9qp]o.{ݧ(Gi9sʝ]H|66@rld?Ӻ3<^BZ.0;kІP 5{t`wR(|&a;Rž^4![$Le3E߱q{9%g&RK0siM{JEkVc9OTQptMuAp-Z'6kcw5jɼ鹣PߣCXN} gٻa'1>OFtܼl ʛ~7:D0 RţDInn< &UgTHƋqorΆR@~Mۦ?I(̖nyZPM30ӊ~NA.FOAOn(jUN,y'=);Kg߾+tLm:]Ҷ hݺ;fA[g^~Rƃ"I yF^.mHRq/ӝiQ)ǂoƄå;'#`$KN!ئӪBUS30S KLo{h щ;s>T/z̙W %&l[q/8P'fvׇtU&bxVuȗ`o퇌 UvkfV5ޤJϹfyW5ޤe'/9Dӝ'MX`$SK{OS}ZOӟf iͷ{KL蘾CfnDŽ 6ww_5_|;wV*Klt"j؉1Bm "FEBҞ?}*!"Dj cJrcAĝ/~1(%Cڮ #dJd1[|8{JС~ʠ$E/%,z7!|/TS sPG p"$$~w.tn0*F8{p׈F_X7bdpHCoy}_8GmˈUiwO)ވ%[dAӶNPZ6/SN$B箟h eF8j' dFݏl"?Ơi11HAs"d|3֎^6>7`( 1'aul +O}W~PMWKĉDMVHv$߂VtɊA>hS̡EcsT0Ko.$Xrw<ۮvt0rl*}W ⢜y;1&x@rN39~miI?a^X |鄑.⒳} iT驛ufBo^#W}#]w}IF<"/piJK>F Bb J |U3:` 95\,gWqRLu~R7t-i|ʝnvMOb99Q C&+I]*"|9".|~bLӶIvA )FbN%XUaMJ*]%5 ?dn:s C 1s"5G L] WHq\_r)0 KH!)Uq~B6łBgjavF}g~Kֻ̒e6ҭ`)D[tח*N1r1A)դ@X"KuwoR~@șUwoۭ(N7+mIu^<3M?jZr):UPy+AR$e&2X0vZBxSΊd"h<8bFJ: 0M޹ȝb;|t9U)bЙaIV;P{:?w Se2V5۽kci+o6" ]%䱐QyX +]1"HS?7ʢd9I=rkV<-UQ+%Xx9aI )Q uxIr"5C*209b^΀?0V{xM2 ~ct׿cmga2SE*)3ZNj)H5UC 7r =`DحK0D2岪SaXVJo+-ȶ ]`Qդ2@6cd ,V*;1 :Ƈ4-=\OVW9%Y:+ VH }'sÂȓc*H:@ʴaK| ѐSWK=߶:QDT#иcTu{,zJPeYrЀ|  rwXOaq.;C-H!-Z93'Y); Ql$[@!-9d >4cS *pɤڴJҧMUuoCF[*Ql#ڧ}teu)=:`e@b"pZU3=05$aS"b͟B62ks+C^MǜDHd Ydb6 /3ywh;',ykhAO`L$C e!Ha`Nɲ/;Z|-dF#8̵m~J|=7Yo{7{ShOX;j{ܽQF?>v GSeY`~kk@x1Cn K| K'7`n<7hg3S}ٗLŌfo]\}X*gz^'>i?y_K۫^.w^u 30dz޻nj|Ncip1JF#Рu{--4XŌf0aq\NdRBYLB`, KtB"}زrFuAGA]o]XQ,G~zm5Z>%Č%v@. ,YB}AegZe\NQ 37-nب`uٞj1In'\\kx55(ǵ8lJ=ϰs Ow'iUS7􏦵8hJ׈rmM/pl{ .ևqjo+Bz0 > W<%_0=]*mf 2Zze!IrjHE6hPmll{h)' ;Xw+T+AM/>IMg=eFX:s:_nJŠʛџnX25dԦWw,mcFZ` ޟ^8ۙ>bQ^<<^i}):MH<rف]PvK`pԙ8cu)f3n7HB~":H u:_n$46=uAM3hSf݆gy{Z]H\DȔdrV/\C 3uPM VXxFV /M^HZxzG2~g`mۀvR|E) `S,̖3X͒5W={uEps;R9]V!狔a{쭉|iD7bҭjN77W?ΖOw[T@@a"UT2J飘5 gyCk_=)iI=ygbdg{iFKISfIc%zI^X.xʌN[9wt`b5m@DNa,}|PxjAݲT&"'i$N^ J%="dfXˆa>r0:G q̑zfZUe} Zoe;;}8ʓMo^}v74i|xUv>]DXNqr)"1 ԞG\} 2&k` k٬iP ,yGnZ1&:IK.;ʖ6n-%6H Hj4؍EgcC)'V,0DErL "9G'#^Ŀz}y«W=08\iu:7S%ڲ&H.(E`NBIt${~p`4;Ofy&KIki<fR5#aH}B,6pl{[~vKߚcHXqLeE}rJxF\(dhm@O/&!-y^R%y^RT frUqO*kmfĬ-DQz\^ X}])RaY`0ףy;J yۏi\$~yvE>r:&B\\}JDڜᅒL<s+CEQxjzB2)|C Q m&M iz s` 7JŰ6g؞e= ZV46?OW7e⸝ęмQ ru1ƻ4Lwv̿8^|k^ߟ I9/I9o&AE[1` fL!{J]Fϣފ,!Fə Lul$g%K+=. >1f=qʎX#Sb9{<:ȂtĐECӨn\a )Q8-RJg+*,"wHJjg ɋYSEGJf'p.;__J9**Zmo< z\YB)-b7lbu9d ͹bm9DqIrȂ3[jlk Jva.n?~p=mi^|BwB('\\)W5kDkNhovDt]YsG+z }(B13+eePTA& ccfAqmM/23;5ؘNʚ_(ȅ*\L_|WQLXH rGmn܀WёY9\fNt1eǨj`],'+@>Bα8^yo'?7ǽŒ/L<+LocO>܌ů A\YC=OҶz_4BQ*QےFO*U৔T Y q)1nїE.wx0-J- >h?^*8bpZDZB{tiʼ`lWW]۰~ik$ \zб 5vsf! dYш m.N^*Q`HE[b>M?' fE3ȫ4\|wlWݙ딗MLRT:0I|D͂::_4LY075wvsVIXxF!gZ̾؆'ꮽUNŸMs1T+) z,ÞSOI 1:%M(F})#%Fά0. ̙y=S) ayIa@ "+!Y屌'"bISKjx'AI SC6σ&΍ミ2;6kv ^"TT΃bmԓW&VS\Z\J5U þU-MܬNm,g-訾\$l,ǹU06HH 1SR="nؚЕh*RGIѳ0uYEH`Ǎtz*8ò騞Y#9[s` $/5Cri=QKTI R ! 1at*(Fh8Gp8 ƪ8myc%$m0ٜR쳵j w*7^ӷ5+H.O/'Z#`ׯ2 ڂ%UO;,5B@J]9>2VfAƚh^ eU]m!L#0^l6G({Br5h-Hv&U(ݤ**)ƲBRRͬv?ǥfѵGr~Tt#C9"zP?^Ewd?=c_"r$ ZԫbQ%ڂe2u$!dd֒bķ{uet bB9 8Lj/5?xdOC4pDsR>Yk1!@`APL!T9{|$ɕCF.lDcQ a7#0X]Ց%X ')k^Cj l|,v.Xh$b7YeQ~|tAr峷//X`Y"Ѹ,T8f-E?/9Z[q{kj0xvCui֕K0}|뱠+%?L`0?M6r%D_(կ&%^r`nW,v1cye,vlF?^SfI+hlќzl)լrܙ,*XH`:e+2J!FKayA B5'B떾rn/;t)es^I%WC,%ע#uժ$NZA4YJ=kX檔x;YDTɢ`w9#޲t Cܸm,}9E@gn'8߮c1^wt[un>_vt]J|"d,׽~0~Bco "<=T 74UY,fW>E)]*;k *MR9[M NgTntg& yw֭@[3+')*fm6>CNՂvƓND/1GI:#pz+5[c['&$NCZINbΧ&-cQۂ1_^=pEБ</)1 ysW ͋WSѝݿ" l9N_3WW0K{o}GĢ0!Go^#ɇ@ȅ@Ϳ[VS_B Xa0`d 1+lLW pOîڅ7]D"%Në2 Ygżĸ,P̕e J˘*w\6`iN=y` X"oy, V{S5Qΰ坣H1`9m3( ^JDKrjdJC'FTqy`oll *h>G3mLx[Nʝ#FW)R*AbO5VYY@8\ ds ,1JK&r`̀RTˌl Jj_{t'#)n4b1GZ쫏&}2V\H/JHh*Y/v|[%~%8QzE⮽9 %(mk`@ BY\!MHyVkx[DmЬCTIFy2~X3?o%4ժMk$i,YĴ5F *ڙ>UAZ!U}Q,!ng"x3f5ݟ:|J,nQS{h0}lU 9{yzhzT#Ћ eVI)$< ({ޗ$UN|q:aK^rC*G&vV JQ́g\8p#9!y-E'{ l ;!!Mqb}!{BEPm_Ixٶ"uGIE=pcq_s3 1,:+WqTèl||Q+j!e'ybXF8Œʉ6jZ9pE&CPx)JYwY^x^5TKdWnj[12c^OlD8Avpb+akS] 0x/h*I ijZRX+I]KfG겯~LӂX&v"%gݷٯ{ے?\-Vjc \2fS;љ_MS\ m  ~&! AA$[_>j/Zp aT+1׾[73Ǫ$+xф (ЀrM{/,QT(n8U3) 8R֝c:J/hBvA{X#)0: "@|)ҞhaQ|O*Q[qKk($0C&c+ߵ_%7KXBX{S*$VZRv̉}9JER9d5U  h$Ļ@{b,Ai6 9B$Qj2YkZr,X8f:"|9v9/On a>3CP[%OӛO&'d\1J|Q\{Fۑu]!< ږk2אy[yFt˙܃e.8N6sغTvDkq 'ySXıqF- n+-^-!P#ۯ`Dy4a*IiZEsi p/Xa̬!sɛ Rxq ?/l-O}aZA^kD㜻ƒg=& +m9_ۇ20y A&ŀnڵ$${f6ue5ERb}&]]u.$\/KLlڬyZтzOpAEI=FW|"z(ځPQ*¸S Qp۳#%[](n'y2e^OJQ+zԓ|- ͉Le ДcUstjwTQ[K)Q`KaEꅖ;߂r2} w̕싙h{s/j<:2Z9u*UǕX{^)5PJb2AJIm` Cdg/(gKeC74l}}]xӣ؞W%Do i&Uh : z ڴFYL$S}}&m->Z]bտ-d74P>׋ Aq&m7rҽF=*P\o=c:ԘT@epa4 ΰ6D\1cWJ{IV Ht)[F,DyS.Hek*\[QOFMv^0KoϦ]}pWP2Iұ9K 2IS)",%f*&uH)BAWrh+,ZY‚R vtcf4EC\ӻ=:8P$[7}O;w>c[nneaHa0q찛 nonb*ӹ7\Kϙ'!*Wq椳,9mҔgiF>N'`8GwpZT5́iRoC1S*9L&dF8>WG%*'^+FU^ =4 4Ρp&! Xpr,*͢R}4=S&ۥ}C/v{a|m挶ЕEJͥӁ ]K /i 2^ǗI -g*o;mYX/n4rc52%`Bן@Oo;"%Cw4{"3cqIE?/Pb B T<(yQ?83Ƹ orZ^/Y =%~92{:RP|O8Ѩ{MҎBzB ,3h-.TDҪ Ta᷅l䕒;ee>,4j2x7BJ+tKs`Pǧ 75^|8uf}^zCGYY_36K_{ВF\BmUL DrN8P/˗2Ċ 6<7ɿ*OjP#Tp6ٖbm?P;; 'OV}Bς'&I7 8oDQ')u<;5]Hy$I =^C>_ۊap5^{o㸚IJGrJŧDNc F혣r%ᠩ$Ig[t״@RC&m$mՂQlЂGK>3"m͕'[^j56g?''l^%?OK.o\J٘ڹxuZӪ~q˳SZ˗gx 89")"ɢhPqlD^A"ɝD!nr<h,d0&Aw%8 }Jɽ@ ui@j`ſ B7ƹ, PՑguHk ֵ8[/qI(SҕEQzVS*hF@|M"M!Ug&p9|Fx]x>%3 kj TmǧɉU?WLt|Ļ lhCH9>G$TD4(P͗3l+ .p(eCG Zif9~iy\)u I48,xnj2eu֓]OЏۤWst.8yLql,h@s1( y CFr<. &G%{LYu"r#B]bLr@xKK$g ДքOYESAAS`m`*_xy1m9Ng!PpLIŤz5.#Qe.-YF' T %HKڏ7JpOɆ&afzeR o `%C\KB[]SEmmbۖLnseRoqI.,sZm}S_f_BЬ:Rl͹l:<[DygQ)c堕]Q#tk OR l]쉚%liZQyJ6HU˖8=?Q"-ufeG֭%2OPp'%'8́KmO\mKIza>%QkQ^GJD} &2pdxQ9&ڠU#/]j%=}r T̬(V'}7;>Чhti4շOa8O[W\%_m>r%β5D,!`Bss2+ȉ^RCݞrfbX_q/=aj}r6ORԜĤNh[!Nz,Mdp{sBτ(5T>Vs) xKkRԶ8yw쌲\KzJb*9S%UXmCau|A1}v+kL_՟_Fu۸U8~>>MJe:2R s*82KC9!S1HӫlEgPbyƕT kꉲ3iR˭a S"\zYTjpR | ]#|oD J'`9y= g'ՑQѹyM-mp##Gų|V}mRO\< g'uRQYSi @@f$jUYo Uohww0EtK$Q).@\PpdԌL3%)KƋa^9\,8)Rk xJ5yc"f7&kM`)5ߝ F,jChve}k͋(s[/ ]vT|gԴ]ƒ*Ԣ{$QHU̡MTa ܂Xjb QfLM&̬'p̐2qн[4` 8?)pLrM0qL]&_f bf$x>">sˆh=x9XłQuj #uÁoLDL}tiؼtLE!kZf<|lspa; /34q n{O1kneo}}B&=,w3. ,{rEXh'PhlO7 yhV>.~W]TN}ng".=K Q_]\t0/e/ӫsکWO9;ـZqn?_#5,/(=X;IN˰vw/{un]_TYcHE^A)l.Ju>~-.Cx9B3x3xkBS{2&J,&^,q֐$R[JtsFZ*(Dl{K0?&f @x[r+9S%y%ys=a2v9Ѥс)^};(#?“쑙EzNyKy ՘N/,5!x>\,>7cZ)WȪ=,WV(\:2% IC <?J* LHim+&`.\kb1<:ؗJ[.,Z$--Î!"CHGH!'a d 3|.l$:\DMEIʩӚࢄkR!6:ePURࢋ`7 WkY;,LASXQriBPgA;ӀCeYp洜ɩmIp8 )q %"FN,&Avy_7Z`јȖn*ت ʸ&2aak#h.&Fah4Cc"u!g~(*CƂ;e$$Fґ@p (rXę1+%X9e`1裲/(&fIzM.A2ْʹ5DIL"QD`/As )ZjTFD'MYraлx^x~e9zD35zxRK۫2nTcm3}OqbEqϋX2ZQ\ g0ϔ_7tr`{̐I! xWG:5 e y  ٕ蚅ǟ^d_V92ψx<;ȯ{'y,S" s@7ǀaR8]k6ΝO[Үۓuc||E/wxt(aޕ]XBݍ!<]~ۻWܲ,,Ͽ'߿}xؠ~O0W1Dd- ;)~ frGcњ >/PSL`8}'8oo1KiyQ̒et㞗*)@KXG4;3e`|(rptOo'PK*XaL؈AEpJXcHҐHj M[n&WMDGwҐ+[hЎ8nTpx3,% QAC4ZT:֢IA#) Q~$U. O,3whP S8![-H0Vq?҃3yU[Պ=ljYhkN/mg6^漱 w/bAdߌIr>NAP[T{nn>xdoC$eocoO^eN^]c)nGaIEF1 l:2_DXMPT(ۧZӖ2粎Z0OA8ǐE;K aϡQXԂx氶Le91h@8\RV?^IG:{+M5`E*%]f =y@Jv?4z6`eNpOi9\I{|7MȘuW$bɵg K [y;?seDV?U8g9Z,g*T#9q{L)J^%%C(OuN\O)vf|*!.-=vrLT ֩"bW󪑀tsQM;L\Xqك+4J;p.\s߻&uG}nVM()O.Oü]J^QvTm~-[wSmŽE.v JYY?EehKhK Ep("6W)$A7eZ7:>J#qK2qhBr㎏)dƕO_OF[q58ʩ"xzwng3f$y3or)R:jL ~ٔ5٦9/DDoU鄷Ⱦ>?G>09CN:lF78 Er܏۔=/?kEw*a'Z6ʕq -#˶@[yYC94XgtW=Gl9ZKnV=GLVuCnO,6K?Cz趚QgƭZ~a詸҈E=,oGr QCN[p4^NAP)8Z{H Зe v8%S>1LNNcKHr,;;>}W>;ٞДbѺQ5zܙBJ0W# ^3AnoF$K_#ʰRge%CΓ[Z!5#{ LΣsp =L_S?ܭ-ka0mn+z?Eљ-zszsvaI/XG]z3ғ.;"5 X냕ܤVFHE=mP;G־`ʼdR (#A2 H901e !p05BJ4c'O8CDR^Li#dKwIhL:(w(9HBL16hPF*Uy`-lK9ĈhU);24B'ȟ^A:@A=Aj˅6פ3n'TktQP͹;:,BG/ML7OX%"x!Zi5rZŅ8R'q$Cj:kbW(ƌ.$M4Kr,j% -9nP&GEr`QJ!ϰMxNYU](o]ɩfŐUՑi@Bk,dlo^@xwo&Cwa҄ 0T^+wp jmT}=Z͏ÚG&z'} $99 f_ =PҷG6GVT=.F1u0GL#+=2 Kb&j^[n~c*I1'6YP>rɨ`MGtsRW qJkc粇IlTʬjuJEՔ|*@t nQLւb,NSLO=|^+yI:Z?bi$ %H-aBۙsD֚)UGc]Z Z￴@K_Z"|HILܹ_J]#M5wiRL)T1Dx88nA}&́CIʳ.6_{@"suS]OM$wLe5xJ'L'*( (j$/#4RJSIcFTIJ#-Ŧ9rbQF'texdoCߤsƀ7˱L^!Z|^BsyÁS7y1r[53Q= kֆo*ϯ^:fJaR 4P".5aKrЮ(x 84>8Y1AbZI+ z,qӊZa =KYS =(^i}Ld:g.8A J8Nh K'pM0V.$)lJA]Յ=P.nwOx Nهw(!<%aQCAȜj%\ךL\ahk@/RQ=e)#" D(&@ a#Phphf-jc}uݭK=^`aX^ól"x>*2^/43ftX-WUjпRr=vdwׯJ f+1͍~$q"f#X^f T̄בyE( #=UF-c1[@&uMrCT:@ `5:3[(V(t4`H0(?Orb6kB#^r9TŠ$.kv`ݩ9aATQs$ZS,d<@_c(Z5Kn8y) *ę‚ tK-Gl ɀ l"=C UMywmI_! Ce8]$HXfL IY-_5_Ci3C aO]պr٢R=,lb&6 ع&'jox[ҊZ %^n)̴(Zyjm# T\G폓B)Qך͉FeW1 baT( ~*E.XΝ֌vämW^1[g9^b^ER Py(Q0sǙ}{c?QE=̡sioOߔ5_O9 j/]'Fyz99 AyyKP]DZ]p/;+7ϡZ\:gr߱rG#O$H2S8< ).84sGvţg_4{k4,>K?^ uI i3lN8jr M qLCK Zg׹K @窿dqƵUX_N;*.y?}@ 5H_?5. W@2pё%: :(9"G'ӆ&Y{ AQ9way#~]2\V t"I{b[ چdM"QA%Ix+w.ZuAK"0@AFWdzZ]^o#⁓@qdh5*pAƐjƒ6,j5j IdLD+ Nb^,!w}1ID yt@j=Q'x{eقmj7vK8PZ2Q]ę0Mz'8+ ?Dk jJ#U# JZ@.LD#a&R!fY'{jPI!̨'f^i޶l uݸ<ՍowOGgGuc e|D>qLW^)-&X 8WT+BovX=0ΎģSZRqk?p]hNj똴=EMŐ2`+o{8]@tң/UǷ#"nY׋=BV`R0 (' u,"k>8l4Xd:-SpLFgxGBɣ)'bj{%'㌎n0/ìcbrSZC¥s\7 * 4'5452eUL֑($L 4hs <|å Ygp7u\nRÚ& cxn`aA[:XyEC.#>z~r4w%i 6UkmX=>QM_pݾ +2 krT zBѓK Fv3 }yyaL5 P.ik$8O64k Ca I"(wI'ywJι܀HEűC5 R$R Dߣt*=\W0"oz#Q>Fx(?A!A>LPoJeɕ{>Bq,Bnq{xsX|ejH?Z+_frzFvpP]}("I]sK݉*ZEPI'h(ЦY9z@bPN C>]ƌZYg3±z]sz1DU_{P&*5ǃ# HJEVk;wk!*JpJב WLDwoշaQ2i8ݎ@_+QU@3dunno͏ȈBd|/`n[Pk̦.r+﷣YjNv3:3b25U[=4~ڔMt6@} 44Dr"Z- =#T4f[Iݝi=ߊxKkNlpr2t1K;43ƃBe -&E@w1UjLdN}c-İИ :w0R&E,#dp"'%AKK蠠w/SP<И=kۛkWj֜Lc `;SgJ) pF8(Qf)+]o(FøxeGv_c(-ڠC5?>ب\T??$cL8d/:`IiFѢvA!@|ΕЌCV`p%Uj[|jI+j6 3 N FJǒ~eieNJQ>}.s CSRKKdv+g;NEq5arYyr6&a 6: .e靳cV ًnfg~pvq7nYZ˦51YGYj!E(|9yg I[ gϻFq^gϵ zFxZ?П-|!%‘ħ(VFb\JSZ(j6+p-~纈 AAX$Cp¯g=}jx5Fvxؖ~mS^rZ7nCV ^om!@᰹_^ w Hdq+2VVECb.I{$1m½pK2)) E1.Q%C J%EKӿځll޷kKv^DA8cȼ4Ad1H.ysd{ڽBײۤHO_@+!j4 4Ү At?=W[s2 )3ILy)hrHBxC^+@XϼY^W\P@ D/}b.q^{ bȘH"9Đ3}!80aM(@g,gCU-D[̞Z0on*v*j8UibrwSW.f_;+NYҚ NwjWNvUK}qӠRyUrDVbh]j4}[-vAw hc &«]];K]Ӝ`em0Y,C!nvڭ, L[\/Gǵ@ȋR(c$v=?7׮|هCZq:O0+e8ߦKMFwC4%k/s"O.{~ -kI;;*vr9`ZTWcx57%o̩Sk^K_{_Ċ1U'˸= m©p%{ FXI°c-a]ĀS@Kuw *,bÖwSL×<EkFچѶ:gbmhj~ɦ%Am82_I#;_шq)G*e|7r@`/3E+p9pU]8J_NI*ZF䥀  Nn69Nj7k7wW_=)ǻz~w#bOEL-c:P>%Zu$Bb:}qzB-N)P?[U^UݛP-cTe \U`rV5ʮD&:jc9Y],9:i˄)e5,9FUvK~x"Պ%p ]`\ lku \:L6,%h2feMJ,%JΈ8G` r qe͙D\D TcV+oYXYs,x#53^'PL`^e=jYs QMO\\gISmm9gŒqBFrYh$ E"K1c>d+HAƺ3} hпY]K0p^elE ITr,AGfyWsgbCj4up9֞rB{dF{n|[$ϴYK@gQ6pA l[ƯQ0mͅV GȢD+LsgT0IjW8Z7p0!)bŰtp*2s2oW.l>>Cfz"tP~8>+'X֍`唓%˞|]Dvrc 8@x#P!ypShC˳>٨B wh+N-Xg9f:vi9gnx`g2l}%ڐk4Cr+EnyÕB3J=C^E:]}E_j[XN^nCr2m[%L!r#?<B8( 4C]|]i >Snc9IKY?_˒*1^!.O^bRU9JE#+t!IU[_r^ֿd(N o73x5vK>KÊx'Že;ʼnSwq?uLPi2ma{l3_@ )F8+5Iלq(3қJKFǗY TY][-N* .\Z-A*42#@*e J$DI3IFk]Gʈ ɠ0.SgK#1uƅ`lTr3(ȳ=w%HӇKP[ek-}ޙ!gx3)u1*ѺX! Z` ]&G絭B^2pH+הּڒgs/^5A*i?>c>#Z+t:Ma$w;R!*օ4!OvA cDh3Bogirn 1wL Հ"mQ^,!8BT:G"%FLMP5}z F^ṖY(9BQmEؘI{̎Ǒ K13Dfa5113ČBMik}Mhp,AiL[oό?Y(vȚlylj3Jjl9!>#k(H|bY["UV>ʁw7^hBCDrK]6v"D3,p2(]޲ԞibV0+;t0y!sk| #G'Cra1 PA`h4A:[*ۨ2>功 kڄ||SrJ@y pJ# JhH9eYQ +NQ-?nrC;ݛAze.в!O&c<VgCK!yt$amx}U7PV>^_1|TU}?KFY_χPCY88fplM/㥚jq>K*抒}78{ PL@2t%U 8;QNFUldı4ZMPŵġ;˜MBnn9jkĞWy=~#c:uN׵/,^I;ǔ&Y:|.vf塐tTQv|r2m}SivʢJ뭥[AthFe F#!~ܓ?{Wȍ/{m `p;L`?\͗6.֛߯ؒ[-$3ŧbv3!2-A[SEO襲@Ӂ*AEOlk >VT %EA{]DiB^Kb[)2p$Ʃ%G K30Vbg(%"o%Gw^>8K j⋙^܌ˋ/TbM\$v^S.!&3T3ޖ0M -cq-7_IR)bݜRYJ,i"ƌu]W짺K-=Ӫp, f/tDiA26#UVcAP'bK38X14  Y/FH9ì.EF( Ssj(?$X(n`m Q' ВqKGO1lYpz'AQy#cBu*ǤǷAN% ezuWf^{ MY"ͨ!wĻ]r7HwG嬻o}-}]59dLK0쎀B+(G=R@7n/3+^s/\x^#tmasӫ-=wI^<ݻww FW{WŭPgkH0&jHyרu ƭgz)8Rwx ʌo7[OoڷDK,_;i+B* _{?(q0kF[nFƬXV7WQw{7A@Hcׂz13MkL r20fPfB`,)<% " Ʉ7ވ1;T7GjR7h/M#bA{(ES 7hRD0faDb=Xj!VTJU2cF>i bN04r{l`:ƔVR04@ F,❱;6Ij&'&36KQJ#lK,y[}nR+nJP$?l$1j+jV@~DVK{K8DRzI }VVlHJQP7CS I}dMeuhK]}8rc(-io(ƍq43()kAOFa<͗7?[?}EUb".<˜ȧ qA=R#S[*mHv-R$2\{B%5 NH/gsf /=L^ )S-D4' ef$Li"1laJ( vD06fa h\γdKm 2Hΐ,LtMDܔrpp:)Ѓ D9X;+dB*05)(Dž" S|TZb0sA q%RE`ў`ě:A2ޢ Dn$^$3˅hlC y',\ N#+8 4PRAŤXKxW.¶*y5/ FIҟfOsf1{wx(x!6i4/I'S*,y6͕Hs$K8g(0kG-0{;LG/Nkn\(:-S7D #a4Ί}ȝql M1}s~n[calf0: Ƈ KYmx& "]JuU դl 0vcCFZ[boACR1x`"-yǰ2ciG0I+#?R }縛f+zk+MԣP?Ʀ×dƦӽ+p065(A\>l]SSY{M 5$m_7CU(%mq3& ` N:zeDG뮳Spd|}MXa}-Yu@dP 4P EQEZs ):Xơ\CLH"&c:VPnvמܦhN7( &1R =֕CJm:{tKaKjZH?樂"сȦ 4̷ٯ{,&,XC*L豧`ySSYuc*.F3Ր]z@|nN_=HY{ {jI"PrXv߸E"3%ρ2}ؒHwE%Son>@kqxWۃmv92 Q?x edI/kl숪a#\":jo&R(|΋/6KRx'>!GdsMjv$(כ]8GLi*Bϙ r)?X_6T$з iu տG3哆|e<4I {1xE(JL֟ p$ ¬q-]:YV=CCŒߗ6XI`Nև?LjqQ57V͠quZo-[`$Y5|0^*%9+=_ެ.v(:j0Y*«qw4z&T2>K>dA LYyLT GA~;YX yt hEPȂ^. {;;˗dRmіh*1ؒ&J1eH*+`c6B:XsM.xe Säe3qzyB5kLZ U5bdv5V:1cY--cOFcRя\}㒞#P>@>eR"&O6X_*"ك}Oޟx:/77]߉lT{ӗGTs)(LэͿ[fk7d=%aJ`Bn_&h+kW{Q]L0| +)%CFjTKbZW];VEɍjU\2tU_c(mKe%\aҕXiSZ%N mk9aֈ";,ޱܗL[ݷBϱw\nb穱~xRhV,L)Ǻ;Xyx%E90 t:0>n+*dP]F;;inn>c[ݞ2";O>'V.9n N|^ nu$ ],{1"60( 4+!Eڕ?5v7Kڕ P `iTQ|my66rXqa_QϿڳ 9AԤ"j/r& rTfz3u3@4.).e`,P npd@?5v ()f+k Ĩ`k3 +G"퉖98ϻ &LSQJM{/,QᄢgZܸ"̗FC-n68_HIc~֫%u~ 2=HlvWdU$h%)|6Þ>1.P=.&R模JMbܧD3GORH5n57yvvՄh͌S^ؖg$ʊ8oJDin;k=^xcА_T^`?jHcZC:;7,'~)mY|'j8@#;?/,[x25}>T08]Ip :$9}nD.:܂KFXx)eaAYX@C,\ OI}YHFdhD5!j`ݟ }lÄaDI|,;m׹P;%k \KΝFa;k rGm=bJeg7xIMBht}32$twUJD?y2w$#֝3|F<+b2A|536 Ĵk5=K'ʵY h S.ɐ׈LwlWƟ'b̦ML;kn1PjNS 9}* Vp&o1!\~%_ҕO^n<2M68.K0Fۼ>je_>8%c.~!<Ǐ^*.%ߺ[w|KR d]9 ^8u.YV7\X_,h~1ZQQ`FJN5qbC,2r" $Ac|X+Y|%z1DA$r-VGD)+ISVfAp,S̭%AqLp[+$@KK>ʸaUN{P =&oJA0 x.=PXjurs^&"TLZ jN*@0a!$;B<4tlLe,Ę%d" nL-5)" "ceFZBVSb % FRȢbM5Ysv˕.Ίo*7o[e |{5pƄ$Bv_%oov__Q*ȍx}eF1"> jc]2,ݿB Zȫ,ZMU&<>+>O T֟s ?3@Ap%$( k6w;[)6 X27MאFV^V)]S\0q̟/ـ .Sj梔`#0i ɷg4\O0 l~]OrF6TK3[3~loa~8Ay[a~D@cb!K!cP8q9Uw,Lw&A|ɩRV`_Xv;@MXCY䟲ÊuOfή7 yP 8r`H6!V -<<= #a}$| z s+LMr#]6C:ftf 荻]1ƕ_C0JѶ8#s Jw>dS[}#ukãHI\*eI$~|Cg'k/y1^]Non6,3xơzeYLp؇\,,FH{$8d,@)8| GaV mV6B&v ASnTr,OhJ8̤$Z D<"ib1*jTYt%'?oϨֹXfvV 0wnMW`U)" ?m=?Qa2Z9>]:?Nj4-m&k:azg,>]6$c$a:O@?9#4C()9cd0,O:b\ˆ[tL1" FR:X$ImEPXE?2;%F 24>G8FL*& 020TT)i,A'cP 6F[M*p"2 { ȷ)@_!r1|ٟB?g \x37pzzqr^-5Xԩ$RͶl[bof6yX>evep䒦W xwD4޾m]o2x&1ox~1O ާ3 t/i,tHM 8c,s.W$E桵VP<%{tfR-0A(HLdnӄB 7Xf.jåRcҳ-ץS0jr)nI8XR1S&"AAŒHZIb4PMiMe¥(EQ|6q:_ozsR,CYd].`p24qdDG6I,la&9pSQF)k`F :u&:Vyn u+?a`vM\צc-I{u>k֚Af7W>&ԪTpUЄO];xrȀj^y4kG(>h^eOP0HHH/xY`^ c&:{ġ ֶW'j*+BY=2Q*RķewjeQ$e})]rӺsTJ "M*n̯X9&WWj:z>ک8 Y9䪇 Uu'{W[(4%)4YP_-{{B)i nÊ!-3]д]@C׋[`<,}2a;c)X뛬!qx>+b=1dxXwU ?/i(Wk>9d ":x\?<7MvLlY$Ո#z MO`qTJ|O4i_CH$ZJ>g[XW)c)1fP5~lDnJOKXȟcf$!&Z&UdSa@4McIkʈ(5&Tq `iDeDubhL-we$$հRˡ[,-\3nT$8jMU=Ahh,.穒k"2;# Z(" ܡVU!oO,o׼}ds/],.s+5l(wG/U$cqVXYF}Ǩd4?;ٶ\aMfFktw^zxOx'.O\T  "r/۾z* l/',A2Cc1dF4mf =Ts"ZpChBUl8KLK;SmƾZlAo^˂n( M5n坉r|*SP_:nI.#M}띑fӺs.PFj%es˚UC0$ ,sl:xE$y>P5]"TJ׻ZttΘ9͔뚳k3:?KHZpq*ӭ^&r*7ϯ#V" N'큠%;Qnn._I<6.ƎS{{FȹUh}޸ 16Їu4s?}r^5~hy҇-r/Ӎ؛?XzV} yh!7= %Րu)zTݱ3I.b.9'`m  Al z"L fCArR&NI$Z 8D>̥=o p_~!zX ].VfTa!DPlœ/pE Gkz(Q't=FH6~Md4EnC9!1]ޯΌK G̚L7nGn3O>dh.ޕ6r$ٿB%}4]m`ۆ2yD Ia}#Ū@*Ve|qӛ|Y̌ޣJZdqTQsYijH*&G~fubԃRaS jxJST,֢@yztɷi;&雤dtJ!u{gnkeZ64䅫h#3=ӿf`ݚb:MQǺ=a dZa޵uk~n/Ӻ!/\E);o0IF|&- sL2}sw!Fi ,"HűAaC+slZVqew5Rx$)"`.JE ĕr.P-wTO%F(ƽh+^rWf`p7<ڄUēsߍy,PnYO,)ly B0=`2Yx鱢Ox[rm/h o.S]XX0`M*« +` kkPaw4Hh2F)y9˒\ڔ +yi2 ԭAH,إHۻ 7g]a#}TiT2@0M?|U?.AWBvMp@+8K egE3u5܎{:ejިqWgߤzg#*5*:|:z&L0X (D.8&6wV1TH} wRqZ젃R.TA]""=GSɎf1EDcVc*>P axRCvϰ̴u QR8De[WcK: /V)%06! 5(p|PxS-6{9 #vZF8hĨ ؝fNy  G8u#(#ve-zX`P}MȸЃ7|YlpÍEL <q0`n]&Ӕ{A#njR )F!f ;-%!6RݕCj5AZq쑞uC݈a A<aeQ8NѲ6A) N$xbFG2l[}Blr*slH1$QK" |þxsDQ(GQ)[M&6@0ڦ^/F`J@:: %Aʀ),#!Bs$dN OA+UTղ큹NԖ94`;9fXs`C\r:X9BW3LEOKVyUԪx?@@Eg15*$A pk˴Ȅ1P*`HyTa&Ě==c:_8,{%) <Ga-H2[z]&a5fiH g̘1E`LGTg~:Ta WlؑUg㦛2a TK^6Ju6nnQ\3Q.&\M2iGSQU[q8nsjbsHFqv2-$P)U|9$ǰjMB``A5i㦛*bMRn џ~pV7cLH!$S[(Yx:Hzhz>QF?݅Ncu#7+]*qt%Ж ȒRـ/ ঃ<f0\cVoL1\<+CJkq=& Wq=A2[ilQ8E ϛJb1|n7"@5<rg=\kj]sk*WGAHٱFկVAGD9GaG h!JfͩnTFz͡ xM 9?mgCҤ4bjupś߮j+Đ PJF'{Q`ѪBJ0tx}D& sYюCNG!\c;*&bvDps-ҮZcl8<{e$/B`R})aYL[&5@j}hbQ`68HРG1 k,Iu` ڶBq6lVMUѨ9sRHJєX'AQX8p∄+{)0(dU&0kcp4~$-NvRmbۨIJ;Yj˺8 h'-NYu#rdN vt(MmŠ*PGVmD ozUbrjTۧ&o vq@͑Ӷme.i&lxOJ+XcC<#p kK'~&~z81'Iex$Hɶ@DKή~wva7כY'F+jm Â={դ3l v ̀+as3l8eL~ ?m rA\!ެ66(= POfw׵Ca%;8xcVG{}#C:MɱqMZlQ-Gwux$Ш{HasoǠS؅M\ۇ4d!t!K᭒4ljA*1[׳s,e{1`g!4}7epz~cCo>P%m(^LU\ 91N ČƀPΐvnwBP;c'0bJ%,2 UJ$S)#"kaELrE5C( -Zeh@4\]HaR)>0qdJ3JU5'yϯ޾:)V!e[ "7,<+Q  *۴S!'6xip5*|YZ6߷!og7`YX,;> Jw-PV4LC٧HVgQ͝4ĢR$v.u@tv-?K:jf(Ŏ}26bDz jgMiku@t 6L=XqZ Q0s%YK[(Tc; JI)0z(RuQ KSZB=(%E9ԖcP6-WP\'LLW">B1^IMYS>Cq5In'|!1泇,^\^J.>EkEQ];݆?@h߿S~kD}^"Zg|6|,LL01/Xr6YXDVtR@u9Zv*KC4,DgG=э'T*6Ih| 2hq ^IABy5*JAgS,gЫmZb#XV0- e9ZJ5mtXjIP;Em󚼙ďg8O#_ qz}u)Lӛu7^fݬޙ k*MbznNWŧ~*=L +]޽w^)[(*f ~??xx.f\B ^_Ǯq5=/>mKysݙW2'ƍ,?ͅ wBUeɻ<YZX?ƳW77x4!u|68 Eh̠=ǩR\$CӉKIU1^S3b`~r*Vm"+/̼ʮD5:\lyݛ|v\7 )TF3 *IL}%e k@,@nӄ&Ҧ8#@d q9N$aBpc R\JUS3FN32R)5I"VY *vnÁk1wҝ6.ByOV>oj>߂`~}2}w?w2GvU{Nk5?HS,6#>}~QOI@7>z>*4 "ƈ4<?xsb\V}^3n=zx7 ^9S$Y,^g=>}*ާa0.d V>?e-ZO_ hcή wa/[xw_ SʰĘ)SBcay޺! (BYr=W'&n|3"-D)i1Ŵ;L7dc x>hywz + ZX};0|!k̃W!T7HL1d)QA^fnPU1 TVVd]0>*עG1%Ffm;މ6}4cubs5҉L\tB ^Yk1$46U?;8Fve\N0Z)a0E$T FU$*u\Ao:HZţsWч)T|+>z(kbXyz;{rP_p Ru'lCDuWکp^WۡQ¼#PPHIƺнq65н u1>ʤf]kje[Lw0B7Q t>3w7bg NmKR@ԐTv 3RKZ ڮ^*.#<=tTs g˄9aJiDaZ ` `@Xq MiRےb .\~.>Ahćiq؉P!2MFi],s*:b)_a:(%SS/9|kՂ<ײ" hab .XJ(OQ*X.(4iUBYKpbl[ @5[ .cIaK,JBTiE`Oc,]Vt<krLe 1D,Hu a(d |BT,4C "t-Q`z$d ;%7`K!9`2qUMy5^OfwM#Zgk#FGrHzʧWGSuף-|dftNW߼-cFs?Wv\,7.z٭:%$pkUYYHOI^s%*l -Iȟ\DcdA}5nR3jX BD':G`vݢڐ?6)!4ԀwW;Ύc~*ƙmBћ?~,wӇ3Ee7=\VN B,mgm:J3Xy;~/x1fM|72w1O;Zϸ^=~~I&ޛ.53XSRNA]PNR{W$r>͜/h(W#ﲴ>_`ߒdM,>zɍ}j7vݖ+R60 zHCNɑ`C1CZZ|Ɓt'A9w@[!һ ";+Eպ'-Ih|?h+s q[m;S3. c&B9][(UAMFBj\' qE!piݱ9\wrqޒz:3 O{6/c˃D쉩d]<#&Ck>^?/i_07E1ݍuVmEdTttV*e=zPeYWKOԗ`|c0)eȮv08_Huμ P ]n$4YimN̆ u,tw|t#Yӆy?mڒ :݄6cr \o/_;V(9o+jo^6Ws0~* 9W%>u9bl8S"LBy 4]29U7Fv<8/@Tp>qsX'sHЇ#}hŎ==`gzMy@1NKdH;PC!2׫˴1U:\p]*]UB4zPC=+:\ 2|,0i0\Sa'^c*r*A|}eT'ǹ0O&H\u4 I &yEǼ\(öPŨv޼!'|]>g~^rmz|29Lk,g&ciWߌ3]Vo=/La3>XJ)@_`\2'F*Rg6:ь=Y J3{dN Yտ\ﲇ2z!1?W3m|_'s!įnlpO?˭k:7.fU5fJZWh ʴWi*M['% 7)@ +FxD(ΩTe)20d3C8oXFtbT6L:/ Z!Rz% LBBOȐZWzo$k+W߰RhQk۾p˕T2HQee>ǕjyշB^v9:؊SqC!4?m u ^K% $W]$G<4PFEP iV%X(ˀk PƮQzz Grw* *wU]Fz=7D 5+#,>Y{$ -&KA~^C8`8  29.n$^G{3-hJ,/v%*sqIޗ6/X>p4`*I`+v׊EC;9L2l&c'K{,{<~K䣛iu^PM<%t-IdGȅLhmqTe()(@R>jCt-5a_iJnւ1:]MzVsd+=gV{A[Zl%S1 j)k((:?߻x%1[h2^ualܲO}דv>2͙i&)XeB3[ǡ I;.d&@WRKy"DTB3IUˁ2cK-GP?[T>lĨV@Z=D|Jj%gRhhshb1qjg#ۭyt|=3w{KɹJ81â׬PYYݟZik)&d/1]n;]=mv !1 6=՚kq\6so3Z'߳(FuqUƕryxQ )o^o{ל,džB/^,#)rS^K C0Nɓ|;7SI>m5gmx;'{Ӌ6`sQJτ_}3ogGo?I5TzMkv{!>[ 9sr;6BE܋V Z]|i6!u'w f77Xў]uu;M-P8J:ЛI ݤ"Bf~2^=N~M/Mzcc.g.e6cmCYR *isJdYّ+#[$ 0gMU:)%p %a5 M)`kvs2ZsJhM?ZAWV _owxJI??^T4(Z6Yq =jφw;ߎ})W:׎6aǽՈlm?K%7U;%: eu uk#jۛ{l8 'kٲ¸鼛gATܕvFT9~zeR(qЦ9X~'Jt-@̤Q;yhlFzJ0qjI/`2†lbO^M)^~$7Ƹ & "!r ZP ȋF[ xM+gV&B$ 8-}yHX -J#I*vvUF:4s5”#m*+[Jr}E+8E1ˍ-"h2:q@@',7rQCHVREvJ١ P"IsBȬ D$U MGD9Rv-nJMJ(UOR# R Gg4<@"79w0J4 sJ]6 3[,"B ?)Zvh@%IQ6TKְDU+TX8y$f=s|ٻetw-_]_zjE!]b>|?]$QW{6X:?N8DOՁStU9_teª~]fe>UD`C'Л EǔлT LDFXj XϔGA .)@IW!{ԢZRtt ںB~խm]g"bZN~ Pm!i+Q ) =_Wd`xf %T[#zy43<Ywݠy?.r4YRyvTw< }s(tF\u+˃K+E<-(?yEx2IC1ԫC1ԫr u>} p]Lw#˄ң i MDCE#e( !~y[,sI@肆.m'ڢ88}`f,fOb:.֓7LG >}iʒAv|ئԜa@vaz=|?N_>М}>Hշ]`i^wQ ̓ l@L)"w1μ^EH1j#Vj%ǡ=yE황R<L @k! XdPo\ C6FO$*rzG{8hM( *;d,LFps7ź^1fx0/ܵȶ6Dv镛cFzT[[CNQ,i 殰8KUcgE_xKG_6ģ jK*Zw OW{$xVcKb5*B P*T/zVD zI޵>q#/xup?VRKJZSRVk/ $eGpuhc^) c r<:޼My@aU,ڲۆu*yIn7DѝkIrZ9zq;y|a^5 + UH.ZS:Dx&uC$7v}Ж>wЖ>MmgnGbBK:T}kL]3Lג쐰QH<ڻȮakWz]FFw^'c;Q Jw]w{k|2V)nW3+il= oo+}+4i !,NɃY,O$?J!N1C)"؆zۚhy)&9&|t~]Dvy iqGoG$\*)T;5j4qY۱&(ugǣ,  * A0v@r6(8˶CA(3jBxj;?Tgг;^AK*hᅩ(T4D`$hdJYh33ω* 9ց rVi(TYTy)*rJ&fC ȫC|(yۋ3HO H?2<&A"Eaq3{?7(jEԀjܴ0a62Ī % 'hlkA?;~5Uָ%j>v9{ ִ;6Y֞6,܂Å778tfik!$weګ7Spt+ :`_a{z46½:࠰d2D4Z#lZ=Ү~G6%?u|:*t5ڳKYHFvk-=O1D{mi(o M>"8[?hg,촺f!*Bŕz X6xI9Mf%=15&6kՔ1=~X F=a*gflj dxz7aQ"Ձwb"0fR6PkĦf20H"vpj2˧mḎ,SH1JridB~t5J6w5hA=VM50fVc_<\+I4?ކ )Av{ί*~~v0&7W<ܤfOZѕLJy\U~UfbWMoP"K 1a8S) )XOw?~ ;W_Z&\nEUFLqYH+sb}Z(rC߅BFk6O|Ebx~~\Na9]tE@&SJKsf0Y2ԅLT{)}Bp]VVK>G֯G&Yer)jtףj#L'Al4Z9AgU/ƃ-q\#Qy] dޖ)xg(rS:=ޟסW+1-HFr#8NPB{&*-TyeTՖ kk_,H}Vk]Ř:O\^831;jVY,M5W1TDjI\!$!1$HL-J.K:Àch$9mp$q:/B^Ot=L͓BExG~5xn#•iw%0g L;Rsڕ*-\ptը׋ςyE`l}h5{U_Jd ҙT~@ljo&!#ᴧ2nV*ўvS^||iLBo~` cOz-toZ~<:L!Ϛ~7ۉ~ ? *x;r즪FM5Gaobxa 1c_x(yJS?esRIsIԥv-ZJEmA+8(VqM-N0X)sOV{h13׈jSxކvg>Rgs#ЎT & +BV*$Vt3-"t +VbAҗ+J[iuHE#ʜIS=jh)GK]<ߎC.MWꋣ>ɾXitF:Ci]k1 ٤*kݫ{C*rۚKۙ&ݻxe U+qճ~f_il1ntՂ@Hݞh.jKlgFE 6/rEzJ6GЕBW` sHۜs_ީ\Ӗn$J[ʯu,A;>6 <uC/$n3!J9)NG5Vj; E^:p JdrɮFY!˺DY!rln_9AGtLGaR5$LV[(B$վc5qk~4~$aACn@&!>6DLEx-MCK4 /_U@rbM:$=GlNF[}pTZ{jPGߤe!V!qhq5%Z'ViTo*s+vj7p X]b-4 ^LD CԙԨ=hYUN.xp)[yYr-K CjVK*U υYDWO^G d,.ob0_;}v xvONsKrVҲ\K]!CѲ.A~J9/CwKDE+@@"7:kj&^̨Cjaᚉm  UmxX1RWzPS**M"ճ?ъi ]ܝQ:)>IQ}ȪMyg>uK:4+E6N{l>LK*8Ju"3uh+hS+r4zmr0pU8>I,0Q2G"AĕxxV e1@LIJnv0P*?(cd" !1:i-0AxtbA}.V*΋΋΋΋qA/*ZQHU(EYt)Y”<:NӁi:x|3@C_tRo蜔[l]%2<  KfJ =h+Cw4UVZWIJXT ZdL6+|g8drס(Q8H Kd"\Ԥ.K1xwg";BCW 9 +^ }dz״+ݽ  wAsb >is'vs}Mv ?_@#냵1{ WFBˏQIU&UJ. }!Ǫ[ܑ+Vr=e+A핯Zd;J MҞf j|9~f^Kⷋ WNUgf2O&،m^|N聹*xKznb@F{{Ffdz 0Ϫ9Oy6ownz燄6bGFbۣ7Bs!-n9%t㚗*дJMK4 WB 4s PIГ^PrUeQ.\XG:(bE%@K uȖ[ri32TFXZrZN+,Ss, qò $bcN^6Dp VgΉ<ޒO})Їd/-ITJnjXD2PF}glDN$Y(Rp-3r,Si)&NVߟv"aq(s |^]H?IӍM U99prgs2FGOVZᆬX,rU <"5YvewlfزG~ļ\.ll:/98vVonզ{j@X+0Oh8:Ό@]+F@l4ev0Q^/Ϣ!hCs|K`A}YJޘbOI2_ hi!l<:@8n÷}yJ)l$4 j#t@/ɕ\w~Zu|V#1V-!A i3#r)LγYoһ-u6uC ǜ͇&-+/'iH_9l9Da{ݟT#'0e<.Q۩~BOͣNͣNͣVѦ@#TGOkSOq,;595iɖI\9d]D}u[p^;kuȒRܚ^ Zp~qܕCiBa0S(3cm|V[-X0"SĊofGr=I-<_LoV'Cd>YȀΪAA<жzb_;U/A?ۨv;/qK"vsA%-hPXC|EW2րEigJ_s/W#Mkulq33&/>f{2kOU9`|_`;~H=~beZ0"lghd`@؎m uMuKvty=rӟ3E:0@23ZĢx@2Sy1(+bE0b~rD‘цCp7\w4Kx̆^eN[IۊOy$eJ|+ͤŰacYGpqv4\3}Q$i cβm23 x-Xu)qN?%y V{ґbdGə0zh_F!d챲b/.}h9#ҁY*'kknFŗ PXJur]q}XTPI߷1M )ҦJdqi| z:pJcNJ|;U_vV#23o߯J# ۼ8Ӷݣе,?!:;u}2hgY^7Egt2\>oƎ+v1Vz<=e@?_LR3qmDK"4(ei_I FَpRhq_P)BM.\ xLy,an-c~ `\gfFh3't{{ǽ:|P|ޣ|b9DŽz5jG-+[Divz5u_P2G0efg|NUnp!J@H[2YHѮ j Ǯ^\Io*iNhqc?bmv~ir+j`Ϟ:Տp'};5dok[,ٛgO'IIvv:9.Un/Zp{ Qڝ.4O*l݊])ڈ^}Ru S }LrIqCH(O{<ݳ^d#1{ǚ x.8#mW12Mpug+bJwCLz(倹]]3>\[o@7icr (qu2%"'oAQY##+VṣNAqRu_ ڷÎǀ _GQ z#\?q\ 'RH"._p71'w ʹJ~i7YM='RBLxJTݟ>D|z~۩>Os`"#Q S8D6NE5ȉC+6PyI#qkΩF'! p2TeT0H!ɥ ( n"pU +AI IYKtFE%ʚ! @(>Y3)օ&IB6{ pU|^Z#-# E9(VfL <9@Ǒd5ށ!eJ磋MSֲؕGU9R0IUZiϼE `).C5CZp p"F![Eˀ3U]1S#Ӂ@IK 6RqJi#A|U4iZFl\Ī}AvL5׆#)x^ӑcHin,\^<7yv?_NF6C⣟W|-dh|5t~edƉ7tOE$wOտs=-s,Qٹ:lguJNו:(e4Joj]j;p8xajaTj94jh)-^H1bno)@54U'ůϪx䯠AnC*$*I]4:1Qu 0n+;<ӫŤ$CmpЛV~-y$JZ*YTxc8Lk1 9?"V.0#c_ӯꯄrl9O_Ȟ2ucdSi.$!кq^DYsY+mgOy:7Ŝ4xi/ a΍E9燶i3U6F1n Ys&)- $8}1 M9VCɕ*F x!Tr żT\`[h1M6uApl.?kզT4:TI/RM1=ǀW367m**4FkK{Xh5N]ޭDY uJM9(Dv8  h{R R,,F.QR2$Q1ȭhؒ9m"$`&&q::2:FX ` ] e~:X4a脢?8 tù3;Iͬ^Uqm`e7dcV&Asj5+F;lH4J[^WT+TTa`85'Z4Dr")Y 9!ժl%U7u*`Ml}!%AЌɻJo4m]$FTu%h D4E4AC5iA$(L BڂvA hhM~̃$b=YĐH.Q 0P\! *p1̈́¹1M6j<}9jN;x3$AwuWYsKMQgxZ: )NyEepub2Zŀ"kMREXVpE9ZDWiwtU,(oDCj7ag=_TfeЯT_iQD-^w A=bYΚ@K~Ȯ!{XzYE=1j}#ZZmxd%dY:"P{'h>jFMɫW/fd_ kʙxڌ|VdϮll:Q6!v:w=.[Nc׷^O.u5]t93+~SN:_Ogc򋹢9e!B_ &z=sNiXpln;s>NJօ$!_6)#^'j=2ϨݚbPEtv{N;tt綵[s_vkCBrm$SdA?6Wb/@m&;&¬mjkMl \eRji$tkHhxu3 Q5xC5rs+ߢzJox>C٢{AE5v=V3"뢨'?E;:4\I @^Nh7OYRhV{Ϋpt n_mb/vpO6a WLf!:XGE;o,:LP۶d:7w.xH稴*̆*QG72YW~C N]n1~qk~i+>Vhh" hsS5J-L Ƙ´d7Io}MI^=X܌5(c}cr6K缡8W>:%t) ƽ,!UU% ''0s:Z𽍯XRNlic%n_$sz%u u#o1{qW;T)W=-S q+*j/Z ?)ȂlM:1ؖ]Gg=om Km\۹^F2mtˈ PR-9ep9z۷==E̥BQ\Ly9e̕ /)7͇LN.ᬹsc'κ/>ыA߽ENWlx1{t؎N;Ckvg!./.CHmlU !fgWEUQB3M$@p^K,0EVv!m+pi];7݄PyJV۷~zjM'[ EFWV'\*tZ<,#IB/;qIyӭ,03n26YyiZr"E/)YU,N 6l#82XRZm_~r)趋ɓ/TXXbJ?b%anEkI5*U¨GW?~2* kԐ%G`<{7G d{eK׍?%V*Vy y n/]/>(40^&x"5:F˝#<,X#ݦi#kۣh$xxt̛_LxoL#T8%(dwEʝs.[ToE*t w BېOd|ܒCƔ<b"q_wY[*PwgH S&j|J]OI3_J&0E;8\e'N̯pBҚkvxvx*^5TO\?[*u|yg*>չ0;ń#r?dS8o}=Q\ JO9iIfB3 /EpVK&4LH8FZ-]1^SoM5]oFg.cpNO ^IxKA{Yq~S @4 4mM@%(qA쉖BYM{QL`WyğogՍvvb )~כ)= eˀptRN8 'NSVY  Bl$ ,]Z5ggLftHv1AѱK6ϷA1?]v]-DHNk,،LLɔ ,y‡,)Q58樶 P[Vk5Xn!fM-0&AØ @ITO3[*loé=Co<`$&$$X @JofC8=).-N +o6DK$Zk%|Q=5C౾ܠaVJ)@u|t᷎bخdɽWѫ?f'Zl2V^1xL8f7]}y7O@OgY?Z޳J^;HNK%] l9=aYSj{-͐/|fͩzmkGnLw4nGԳS{nntkc|S4kNjlEϿ6WE'W0[NqY*[  ra ~hhx|0ٍh4B!b.tU{S59i罋ͤhVo}*9s93G e)ս3p7-Q|FRcdG)MXHE} XxP{Lk7Ž $:coDfCW-4:)&!SZ(tzT;pj+]߻GH:KEʞ ; d9K (XoAi {C?Im`pO/Y7o:$r,-V|ṋO1gTf:gnY˨ZeG5l]ώ^ؘipoV}@Unwt:񷳣 oٍm(whڄZY}'\{رB4zuk#߿w.|S{=qFH#7#W?6Mzlrqj#i,W&hjcXfcbcġZaq>yw|X4.tŷ^=DP_#_=k|]Ĉ,CMpBTet1gz)8B"1Hhzq(EUHMݣ/]iZGZ~tPˠXɻf֡~" "/OcT=Jua=t"yZƭl`uroĘrwq˿4#We"Μ@h5ROc buptDqL3&P?sTaSo-m<+Ix;u h9eIMwhmSA⼻MJ[I9n]AH:e$zF+%syŊ{1(PZo= "yXsRްejqOijUȎȌ}'͏J$y 0r9J2;|@ePe,?SᚯG_lg"S- :r3@oE{T}Y'f1ؾ !BSŸ]#0YXʯ`~Y1k$ײGKDB椠eeδoqS_Y|)O?|-E]|A*L?rAQO!v/)N``N$C` ȣi}҆H>z2OqbOm3EjU6ۢ҄}r5vdo^J&%!dH%CUeLNJ,U`4s췎XjqFHJ,hӳEӵW =  4DO M})3Y"75m,6*(b%f<(!F j 6gdmV],%*"0&Zaƥ4h P%28X:4 X0W`k <=`)G BPX{AaglNRgUՆMR H.J##ކL<5w,%6p!PN]).Y~\ya>k<ԶҠs$a,ySnof@bQ]Ymh͏W}BB߿{BVwKrAiş~{^HQ? 3{QOoWg+Wo\c=w x#hXͣWDs B'3K~xz>].ק wqu;_nc ˋm t'#=q6zЬf]5fߌ9/y<E~ ,䋇n y Wͽ"9{tc`B DE <`ixhhH8^dyjAioift@!T>Hl5$~O"Oz; 9AVT&db~ztqj9m5hQjvo*|0u0h-ADV - ]7- YH0;]eΰmFG({5% ]۩Nݫ8\RF؈L*ԚYs*ǎjBuHi Ɛ* bAy7+D;bPrpBұ`8! -'V@H@#bky3.䭄>?rڕ:}Myqvd,P,r֗/SI}/CW$XY@.7RrÇskQ bUu3SjYS,{JѾn<57_}΃NW78P˱(&D9pX̑0<Ҫj+cلZ9i\D;S;B"eipwm͍J_T/Yɱv*k +\iCR]1ȡ!f%׍ƭk]r/T@\} =p)NG(]z6l"ȠDG!~"kbcCy9hhԞ$KTXﳵixRC!m u\xwPgзɌ^kF-OW0NАRn!=u/uj-m")>-:H@^k4#@9g?7)Hw rQ]zFw_bV\I*\צ A\ñ/xO]>ݻh+6OHN *&7-i\}TKж9)Dm*tc\e.fxg&~#otO1 S.'|Ś/zᅤJvI-V2\W^=2 '6_AZuNZh2u.`Jkc-+}M?D7t7 `*(dV^v_TⓛnD[&OזDI6kQitX^ ca#_"\x((*QpYB@l Q/i7oYdMdZZS5]3VDw#ywqa>FkYbb8#|?uddSD9R \)rا%p^J*E3q.Q3OW嘅)ւP9.hDŽ+*Qм(¢؋znԱ U9RkO|J@\@2rB*Dق #V*iZ`{Hċ15q9f 1yvSItEÔŸg1 .hF!&`*0u%ibUOQJ%/410|1vWc pe 8`D3 ӾC5 Z ΐrBX,ػ{׸qD=urdbDc]saX#_ u*` JTJPd7G%eEbMܼ搨x6O[$-JoR\'{yO'5KZ7~`1V @2ȏ̥ZUX~b6?`cI;$aÌtp03,p/|ӰcCղkߔ={C@ XU 0[ 8tR)OG1(XcIrT/%!d) 2!2 1%`0)U6pcVhfAf0Zb&5#5QR j2!9zؒ&zԂc(=w.=1gp^ b!G1tqe[fG]ћ=,*~qZ,N+0̿h_~mxG0'Cj\FCl 6qJE>kgW+d/*\طd!D)V=Gm7P[Ou0P8ZIbLb˺(f+=٤h5NʍVB/t#;U?WN8X"֬FtS)օ翸 q)E*b r(/Xb!%2%V a‹gupVFS^U{ U0!4&湻ч5} {NGZӅOC{~q^! 3zF E+kPq\T_cUnѨtYz> cCX7c]~o[w[U n6y CA1 D0 $p1pPQ` 蟱?ߕHߢOKintnGHAU\aud,"FYA,DxR}%+ _Hx,wU$ 4Hu(wAUnqW#ыG:>c)JVGrY($d+'KMk9j~9"9/7TΧ1D<E/v;OQZ)FIIi"XS%RHu 5( GfزY#w05\{aPh7ʲV2|t>`[ O0ǩ$: ᵦ0H%5- #_{gXs*0+;ĝЫcT% U+4`TUeT-ag%{Z@IوQu%5Xivla\1yO0ԕi&`[&'n@j00c)Ɩ0Da)dfc zQ\b `=3 .h!: 0r3H@>cyF0S'Zdqk+ѱ.\-Ls N@-ƀ9< Xm Mdeg=lUB1! imR,>"(hVJYa$uA`2E,.-?,&a)Y)xPb(s-|` aQr,ía  /uJ8p"li*Y)l,6)( 1T;CYM!P }юpF۰=O-S z)2{|ʁJ r>Pe<;bTPks=HI )[&,ǖu &E:X=aZ( dY-2+fIH̰u0& n43klA lBa99BkO=s::{q78{/= Z1`z\=byx۞Ƃ 4M>8ֵ"Z^3+Q펱i*wVu{\r}Лےvl\ +*_~•ҊSw5hSj0pM.ʑF:ia ]]tɣd%#0I\#)TΙʐo41#e-6i|M*7U{TpfVڍ!Mݠ|1h;d`crddA2r+6^:Ykv -CëqstDnJq IsӇ8JÅҌwrĚ2{Mc!eFTik3޵})==mXh}>שZ<À0 }$#hE $S1+B3 `Y!|J\<~MZPV;Ι{'Cp*8c]ai6jV""Ws^O;G|slk^NѻA|Iaw p)#Onj}wT#C#!9;r<^jUeSHzԊa2-|xl9پANK ;Pofl F;l9ݞ tʩfK~Db͖6,䕛hMVFHA侣w; 4BBߵwmJnmX+7,y7 k!x\ RL'MvSQo-,ѻa!Dl=w6>w trĻDYM4?:ѻa!D)FdAz0ሶI @vRj':?ث1V̉>LHW2i^XUb l x%۫NۖXRC+=[6e`+#@f ~D/z%T*3y 5'*%O9zB_D5jXs4Ũ>Z;>s'U}n\9!+޻`v2GՠVo #Pq'= M#<^J0!iiŵP/0Am#(cxcf[9U}hme.WհaH@TB\䇰~NAi,eA$Y&adB0ieUP΄$)QhdO7rfO?t`m7k}-g4:_A)B>׈ic`$Ձ(OcV\eIM5K:6)fWq~<'s"O -& }GsAds6vP$h;~AoxkՒj;ݪ8q _N Gwa(VTj#,\bf7ӉY&I7wv%fkD)$2?,7^ qf~_.΁:r?0bhv7llMG#~rm Wz/t=K@zlnO¿>BX(Uscs 4L'`!FB+ZOI)k,đ$ xy0#+0(vo e-#Y6+;Cj(Ƃ4})rYijR[rVҿ[W7^=HxHQ` cE-rp*s:2an-"=^kp]qʴRzjy|qÒrCZǖR9kɋ5IW4nVM wF/#k7V]U#W[Ja߈^0 67SN4zQi1W/mvM"l6%X;Il>LAtDcD+ -%D)FB9Oj~M1Ǎ[I/Y)_!O96r{ `bɌf"i|l,߷(͌8RKbݭ =}d,V)"1ѣtE_>-1]mJ<*V7fXǞ T#\zO@"AkUPVZ]N21>:͍ * JRXh.PxTZEA4Ve &X#Y^"-ZZ*RU4PU 4 {V vR / {4BK)U}p2՞f/=pO/f'u T-չӣÇK~7 Kx\~X5fd*G1W[1 0gx N q6o$X.$jtcOEK" 0j>z$#?>LVX]{0~$|՞hF5$Xq#*p1dl,~+:NaƘcAjz2Auzb4LSͮDx$Nq}Rcki: ;utliء`kS}!mp%Cj9/.b{RYmfaU=MNXn -&l7JLS޼bKNu/7.7ںy2csYrr,ӸH+$KߎPjw ! ɖʋ զxJ&[(P!9Wc:8r,y;iy޴*.M$]ODl#Cusmk*6N6Z d{y_JXi7ወ=hؘ[[a-NIf"h׵-d':W[W.=9m;!gKΒ#t ,Vj΍Πe&Ό骾_mr7;qpx*eϣ[RpYЏ"S1byAשּׁeΤ S˺| &9UE(zէ m=GAB|(>uYy.^D}vgOZ¤%-;WG uXf#&&SHg* $JSI,V`5)ª5JU:HI { FHVAdغz фyQ̭STe4;F4` 8zcuV"c|<$K8Ɓ(A.3섲>I(L0~0 |G}?Nm’KJ9w7(1Aa&W<±Q\M]>/%h"&A0UJ"Ԏ!Ĕ;30CHH`B/&IWQKшn:5?3v\|e,5Q$ |GK.bZ5ޞ! +b??;* RFcLS;V`D ÅjO '4MNh' hbTG݇[!ʯIfN=̧eB=w"t%Q2ϭ9\HYMTV3WV)::ʽ,,W$w6/n'{9{ t`L>hx IeC[023C/>WTwsYeД#.s*HD`):B)5WRwzw06~ Q6'qߧ,DLn) QjKje}!JY L'JJ Za"Z¥D/1^8!bnPR@koءTUBgV',QY[*U@#I2R1gciO=c)!g(^(0Y+&/ '+VF-efkD@B A3,gXx[ Xk# qSu0)l/-,׷~]b)9:s `M4r2w3c^s}* ]KZQAZQ!,P7~)T0^[ä,o/**HeOX.A\9 i YZLE!WJtT{✴1EJĒEG[{񬊭'MYh3s3.&$eԎ"P9 ZJK,Š 1µÕu6(A զE2Imٍ!#;7w`-Ņ[Duؙ:Don_7(+aHp|,sf.&D(gﲂ΋)vR>K o .e:u9vu}o&qkyoUm-zɏlK0ƟM&9Ÿ'sx)·*.^hC0 6dx3lC\9Gq&ĻG&I {\{s/~igy'Zݝ=L;6iwо,0.7/Y,wcАw,kbPoc۳ACYw.]߷}<79` >XA!]Py~?{^?"yp;/&M9v1yxw=]?SʫVjnF%=\jc ,蝹 gr'['zgkS36MGy'e=gFOA,=눞# <},zz>Vm!&o867VO>`u('򏶷,念[n-bõo=HӡwiVm>.z;bpX;CYt pU B/ eت;#$A>XR*QdF+^7c/ƊV3X`JGP6Zj˄JB89.]Lq l:~<Xٓ4xk>ӻߒgIJf '2oBRVy6 /"6XSs0V_%@JTXx*Q0YbFb,PAʐb-v 79Yp28'Zq7,[`ݒo55N  &Z)z&#Vt2_W^8Rsp 0nQDkՎN;).0_.&1Ћ7 RA m{= 'DL=0 $듴m 6:RMs?^j` ~jKj*r3+iTGavw;zL6`QxN̴L>w ף-{>-ln.͆_i#{Gω(}Jءbϗa"1c6g~wAZz`xVD=m,нj?DSsw\ 8 ²p,˨-V|e&bIQ{wZQ gE *& z[WG_ 83~o%V؍}O0{|~nz, wl$89IO K8I9i.6R jԔ3,!&?d)6]J6*/C\Ž&>"NNF\a (Y d`rzEV,gxwO)6d,;<%Ƥ oY3^N`h0ԂI88kKH2 Hv1/V Zb#>^vL+{owPgNs!^>x}NC*AZFXvYbk?LONr- ^]- Ƙ\s&h+Yno?dK^%Iv~Yď_J~ eޑgzp).bVbo6_d7}8~L9x[nv%Ɨ>Rݿ?)YCU;- sF ki^pC%Gm:UiM+HvV𘺻8 t;[8vBh NK"c=˲D7X(Nj8z&WEZ/ʝ[r9 owN=L~c\sI/=gyԀK"pێӕFЊb#acwm&!_Sa !%ZmG΂Zq.peGyr8пd]]9j&:~0.@Yyqg%~׼ d79}wb^U˻z0WDSN* Lc6KOTa l/VX# ߋlZ)Ptl +ClȧV9/L]c-%k/\M&n ǣs܁22gSz8kxUŧ=SzضxxuQ8JXqZp#XЇpCbU=. E((wH\*!4"j5$Lan=D@Vg[MiOl]M焓-{zy1o>o/f:cvL;$߅?]lw1' =qҜbbS`5/L0Aq@dM{mE^HMD#|K|8Fh^mթY*KG0 "ev!d` i o--ӔډX 2@T3COhkq@i"PH؝U p i~Q|<,daYʀʉ*Xrp= ~FK Voeߍb)j2f//S{:swSѮG..IU*&ͿG}7WkPV]꫟ "rDzY?#'=ШdFw770| ܘr_^pǻ n[Rc~;3ZyG` TocOtN5S3t3{Y%A ~ ,VњH@UJ ++<ษS$STNd  N_j)sNbAQ[IB(Bq̙R"HC  &Tbgd -0 m/wu xz5ݠ59s5(+zo|80ac n+KZ+UEHӕ0k,kVWQ=s߁M,kFYofΊ/ 0i+~5Q֝^ӿ'}F=:΂ hQts+/Gv2 u9=J [PPW.ip֌(+`nOdjLJXh$:#øJE'ל<i6yNhE88$"{=%4Be1&X*9 sD ]H+Xr@ Q 5^q/ -xa!ds%5)x(A/Ű1\XǼR0 3lpQ4A #]rZI 7Oof7r`{4wC/Q-=~Y@I3w:4+A견GQ{RΟ0j05ua1}J?9M  M7J@^z̰jA;ux_i8/]A@2N'땵Ĺ.9wpԜ(OPU%L~f`=m{:#o|OT>x5,`uۦq}<ـJM6STQVR4;8 Wx %&,bf_XqHqxi;|V>+Eb,  ]E[LG#k ΁N1.VvwphıbpA?^K0jDca#DV nM_ r^pb0%a:@N L, rco0 +[ E¯ywЕP7/fK# qtw,oّcne~NODPJt 86.\<<^YGl#_1 ̉vJbrZ!-1^+F56XVqBC[Fd Z` 8p2D,,HIU>V`U҂: H8̢nXuTݘG6VsLii|%hgE#ՌkE|Jvispi,$W XKꡟ)ri*i.}|ij)-Kc+;Ʋc(xr&䈴Ft,K0 j~:MF)(8yQm(-D?EU1-3a laJ$鈡:FhUK`w֨p27 qԛ TZS[-^BJV/C!-Rm`r2;V5SpTEv T+"䌁B/hSi7ލ[?i!`Rd`،%ee-f=p j{a[9 KmpU MP]P~h xxfxW6I|˅xaΒ+ ^lӠZ/9MU5I"!l[ fzg<(fw]UɂN01as~ `俅_a[e3VZ΃5.:M^',ƔφwAr=WLZ_{s3Ի\d;ʖ[TQ00˱we |b~lEe?QcMSIfX hcyn.3fEHt`#o׸{cϳ~ PbF!H7-֬۹1ҧneTE hI*.uӗhg2ə`/XJal7ve Q/-5`݋FfƱ/Adq h%07/?Ű9^ij?+Ep><#0q?;)NՉ@ H񼤨諟mlqUsCUg2>8w5+n6ܔd_s"_][s+,= )/C=uWM53,ndY!ޭR$xbf8J% C2G̟qo's$L,1}5f֥^9F37R^Gx[ܴ6vHsտHcޟK3g [̃Vl?; x=t횶#0ۛSrwG‰zx_N,z*(e/Ɉ^?ݹwTbF5]ם"ʇKP! J@Ri '&hKHdݫv2tBOث ث;Wߖ+O`h~14<kWxx<n JqڲE|>Ɇ@>[C =F&v~M|.xfpdZyX p)F5[% NM|>ZBZ!RHA։g$O@|]_1-xċbv}uqG1(go^E^] *& Y %T 6 |/dI}]Vp0 ΰJ`dZ*9vs$!# '%yБX鑤L`**pcCrx +iD*SJOSa Pa{JvEJeeA )ϯZ3]Hm F.KnCiJaLz}UD˻@[Pх^<=0Pzo+e KM^zEˉp " ˋOoV~W[< 7b:ۂz(o>. LTtf)ETKy1;^J,ic (T05!IvZ:r]{fpg,i#G=E⤇)b~#E tw#E=xїWKfFxGMmĮ6L=VwXqVQxcJz$C7Oq& ieswZeOwnhLF) [n裓:t;hq\@taݜ~!q'q>ʾn,:tp7!Ԕ꒹JbY1•Z8ˉ(h)8J>^jNZvi"JO*M q+s ]k~PJr4-5K8uqhdQ`QF[PV;n19mp=t;~uWzCjX O)A 9_gp nu#}MXWnY62td-($v;4 kݻen XWnG۔\#puII(EJҀ'UZ)]:DiNc% 9RFm2!YqeeR aqQ!kʔ6RSyƎqS)lį_||;6Vꗋ:o9T/pex=Ym/'G~:~tnOFo5]J^UCU3 IUL%e0Md2^1 M5\PY -!a*0`md%LR+Iԯ7VH>bc)i৴DOYxcLoBr=ڦooOAh=LjyAp,{|, ivEi\pc[qA&TA&oQ@BN)jZ'9;h ^s"XM%hcLI!9NuhKR];@*D <IS"D8\$c*& tUlʱ}RjvAQ xÞvAD∋ `G#5\_ؐh}\+0) c%|%':#UI!9-;$&zjqb($câ4VB`L+i *ʒ[+om 'UB JRd~!dck)8d7ƸM*'gcB&5ɮ㎗L1r H'aq۩1QfJVTbQ7oFJ S3N5J=9Ĥ%B)ZbRc c`mLBGQYh3S0l]a_$i].~-I%W*0;6`YLtw2J%_m-Zdǒbް/@D^KwqF֜ J u `OtWŕ)bu꫘Z,Ց%Hl9lX6Qu;dhi13`p )d,)>FeVSpTJ~Eͤ`UZXyElBr, L/V\(MH`lY8L`%{kI#@սDT9N_oJ-a!a*N͸(Rvj)|[CKURp`!Z K8 4p%_vÑ-n6563}W7nN&qjuUx$"T#`s]Iy{mi ## k!&̰7eil[^O=/~[bX̅!C.WvP%Hd4*F;ɪTi?l^^y?`2ش/zdZ =Ltyy3syt q@֗oh{^M"IM{"9b&IU620FU t!#E\Eq)ۈֳ/*F~S)/ʳ05>?ˮk5]{La_W/V# K=>EܸP,JI4rI s2+"l8-u㒆อXf͞c9"grb&b9FҒ5mfqZj}򳙝"cD8Uhe7ἱkc$jaLs[Δ#$$=` HbU o'Fvh$A \t1k󮛡iӛ#MRL/M").Q$YȪ0YӄNi"}$MjU/G&^Hs9}>;}F)!,\"Tϫ]!wUͨV2W ~FE)Xp- 6͎,;_O~^0"uq0 ?uG7 yLAzSi&k1E yꯞ_et%Vj-,vPȨg:v%QuwnݭC[dBπbPQC JTU) KeZJ6/̙bπblrq'㵁RJXysEf!jI -Cͨ} =ԢWmu1T鹥QJREF[ZSg2v05dysK|pFKVGΒCj-A{d]RgȠ=z(o@d6vyxjI1,C=x0^?3uV/$iʊ9=)% hND!PjhtmD@KS r+VwJ{m[t}@!FF:[@0 Т}bۻI֦ *Ũ5DJ m'>5\[/h}zЋ6Ǵf-\$8QF*,-Ol̴ӥF:rG"iY.^0gvaY$)^LT"uiezc$dhXaA͆]QCNjk(?< ryxsIMMSZ06 gCYi6t4\dXR&r:Oql{/t S/ٮ_s?u\M gR:C|w_|]8V^X .,[f8W Rŀ_bL8+N- l7g]JFP߬K n:i]F[]X6ĝ:mln֫X{3۷\J3K"=O~}~^uYM7%<]N>DȺ:-?kMphT-w_1*촻u7ry~ܾ\i'U/ӗ|IȧY{+zb7`Zz{V`ο ;#7Ï5fcsPVpc@hFu:׿4tZ1J>6篣GW sf8M/O7IӍg #L.[\ B, |F_4o?%m> 5wab.7=Z#n޽=ob{6]cwg]yfzf6s4K/U6)Nj-j!^:z@+y ALm۶Z|lV$E*#l\l Qrj"1`S DLk hW)A-Idzž20hZљ(3OtJ-Q/}\!wdPK]}j->s2SXMgsd|KvZ'(_j&<Ẑ疡*gs Q> wsmznI7T[yne=K<$`I Wrh-Y0 phЙР)pT p2rOl{U$0GH-B"GH-BmC0upPX N58t(8eaEwl%"HNq%2ozɈ :3烅YREP9^(#, A4`4(1ͳHS5F l 损Qq{Ej)ً`z=FX^ұcu4=R9JG}r`볪85jӁ+ OuߑIR}~ԕKn<.[ÙB7G {j=m8 j{82wOUL,E5,e i܌yjLֹkչY6%CbrdOjF(JѮpYKW%(d&X2C8zE7Y@j # ZjCoКV `-[>Q #55 ߿%RR-~H[Dz$*h'l#qFQhԩgD,PlW(oJZy,0OACP<pԊjLO=acڼF-$jarN4I/cD5-@y%jXf5T2 AFp6[L(Umeᨍ~KÅsޓӍywEsCq뜔ԒX+68:n©=k6}sB ~?Ƌn/SUST[ގ2[[Ҙ:; X?c ư0BVc8xo'l fZ$F2ٴ"'.tծP#EѵT @t3B,i2gPpLE[M(" 0O˯PlCܝ_AAZNAʯ*:į.SW>%Ut|J&Y֓Sz*CsWf3W>G3ԫůt]鹥1ge9.;'wu4A@{hJRzנ)Iia'\"=pMtj|"mqҷH)ڨ 5JQZyP"lFhZy!I:ԒU"TDhIw?&u*ӗuOAQ;2;Lxz0R3V` 8}ԚRUĵJUbj򬇚n 9 |j%ɳjݻ|l,atiR E Vde2 gQ)DQaY-1peZƩC 9sKF !-!"E:ZeV60zQ] BR\2b| ֶ"!PGH-BGH-A͈N<38ujp`pLaL03g hG>m) N#iT KҠXD[FNLdFQ<8@{o(iXaENrQe$Gg? =_/s0J:Q>^վߙ%ɏ^pHDhkі ^)⌉V[zњ 0zD uS끨G {FԪ:uzC."b( J爦[4jLN>DE˯SylX[~.BK\OޭNDjҢvl7ry~Ĺ} Kve 0,j!J1i~.Ξٕ m; _}PQ8+qro%DocF _ga Fm7Qh~TstfNh c-wݨND8:WәN+UtuC؁ʭ,ohYt<%gMjX Bc Aa4yϨfQD0'a{Fv.?]6{=nzGܼ{z6v,lbGں ?S+:8< .n.람~n$?cʊYOpiP2S҈dH܄,3@DRd%3mRD{9m*%Aŝ*Yp/ \Mf]9Mo<XR?|m{Ϸ7 C\ 5=БkkIʌ$%8˴m9sahu$iIj4p:}q;i]&1pn%ISʋ64I=#%ӄy8R*d_\U[+?Ogeh?'l~(恙\l&S"Ԝy̻vz w; j)&KP?2gP QT H>3eӗM݈,ުg?ُz֒RWF 6ˣ&nz >td6/y:@K3گYח_5m%@+JՖM~n0/[Yq='=!) _"03$xk]84~w^'qn+&yMz?=>Jo FQ[UJ'T|wÇ=`;X]v$JܘK>*?W~'<'y5ڝ_M +' jg4%-'0 y ܌JN`«99; Az;鵜!]Qg#jqz\KGsOPκX  R~/ bDP i2'x eb`-7`C2bR[z> ӒIz lBϩy$j %ahGJ*GǏ9%KƄR繈p)|])Ϊ,)UBmDA+)$nF U>2ЂY]8)HxZJK Jd Iኢ0&4dZ$jS;},8y4(V׍ BN~`రV>غ~21Yk)T$;XbF@ڮy@а.ԘDC-Pc<ԗP+P}ypz9<<|H;,t2kc 2#tu¡@ $Ad*`kf85\piLjMtB_4"ۤd OjaR".9/>x./xI/~3ˏ>\]r}w~@k6~:'ǻQx'Z +M,g8WIWim*Vv>憖ڼKZ2Wדl$iod4EEqQ-%^Kwi]GG)C} >S"Kfe` k9>Մ\B.GB"$^yzf䬅$|u?,#51?<}uEǜWW^5gbf#^o@.㾁uBs&Kfǟ0R|[V"Vnjh@l%R)T[c+}lEVxͽ-8W/EѢyRۋF(v"1,Vo0À@Hb5Պi1خ&|) myZpArI(–eA#?]dl¾v8`q@li G9]Bv­2j+ViTqpkAMnH;֊[  -KsM ɜiHMaIRL[EV%~0 phKW V@ekPVӌ@Td p\gYaY.h'>HO]2d% l.md"eTw!.q% Tl%<+Gs:mXgS:غ#&> 7joh@.\l=V#׆a&Gr7fsGx0'w|OOǏ6H77﮾z4x7~|u3ӝ=[߯_lB-'=CGdٹ昤r;Gȯg6BzIJmԭ郳f0cG֊6|W] 2m$􊺶W;8px 2m31 9Vh@Ŷ8 Sm*N7E5XӽԂWbꩱ VBLq,z_`́Y::mʒVل̙qλ/ص8|r j]GChH wɞɡ@V#ƭxƢnܪ>`%7fV PUPB--MXUubr{='=&/arNUJn?zŒ~&>pzc"3H4gWsz1xR/ckxvߗ"~z )gٴ-q gF<ުtLi!1툪$}L5(΢xnc-nSQ_bL# d@J}3#EqYtt OZާYwTތsnp;Rۇ3|:M&Wɫ_y9&~5MDŞџAD^$n‘ Gd|}09(྿# 9mnhD%V:dC`$:_X+MTBj5E8k` T"eV)$,gLpF"Ɋp$5EfPQR8J 4lиv-V>*Mg Z ZiN_L1/my SWha&Ke if:U $' yg?z.?~5 0^Io`g+`6rĥ jK2Fk`„IBK\YGkdEe30FiD:@")l+.{PdolX |щFBuƝDԘ[5㹖\hZ$Z\Dyư0MH&J1|ҧ#'G;_cB&e`jVP1z=Loxa YˋcgFr7յ%;? U/~oHLs¼{(G m\>}{p}Ty$Ehecw?q.YOJm5zm= Mip6 7i="(JФحfN" p%ݴ(i,Hpۉ砾\ZkN\jA8u#jL' 16 ;QԗPknX_b 10%KuɐЇ/O|k%RYKBMi,i΂wJ"JFUaQOpj=-yL61:EHj"WH@ZyA^؜vjx* , Riɪ${=ݸlM''W4J/^t\tl8z+Վql au\s]B \\>Qrɗ.ۯ=MQyUzWLو]9Ac溚W%իFߕ8 vS͇=|ѪQXq}h}H>0Z-e0#%|h#:#d^u ̒>Fv\92RM*"͹kfXs@m5ݣwwZL} 6E.L(KDfma/v@S0Pg,7Ɠv4p!\ۚ[0TyRs64LuVM,с|5dMLmyY|gi1)ʘ B( Y͓yZ|,wv %&"ɭJ%6{m77#ZXsno|InF)hK7FB-Y'A} RQ>j~K*E,  t'j ̰`O |b Ya"x1U[fֺX>MJ UnCLѶΪi!U.+d(87IJڡC:=EұT5DԢu~``#a2(ݵ "#Y> ԰H,fEgz#_iKl8MVq50/qN8/pekq o(0RRWEV=}|jFxT^B1V*u6&i6Z,#ل S#j {:{7p[k׫̓~9@G qlplM؝}Qr7td񭳃GԃҖUY;<0M3r8OmscZ|zh8 rfK1Rw7e0_ՃnVoț.^|B"A]Gg48XZGy͙OżGl8l?2kE^M.XRh e0\/|!,Lџշru36'fH^1\|^lntٍjE3{6ݴ{Bn| R󞞳J[FƱAg$g_uQTJ$Al=wn閏${a5vOKߺno~Ĩ b BJ(nkDx'*@oP=mZdGKfN_"Qi>0Rampvjpvj."݈ZEPg?`me7f<>j.[&0+ J+GLs"ڄZԧ&3QY-lPND;}"*+LrX˩fbm3sFS/DŚPv\LVުߠ֖u.ֹXb.+;9ϭX'9ꊬ#Ç&ȴ V|;$z'ST< Vff>̤# ݝrD A|j/5~}/u `qY7_C(ٌ٥IL`d¢7>)E4xY'A(ݠ213dV1ylHwhv׌0䍒si4KB:$7LdnO׌EŚI4f75>F-Հ4~wz4rB-b+\SC緦ϭDX >Ae@$El0NLhCdҖɣRsu)Z ~!HúD M1 EI>kFxlrcF^@iQyWw}ԕTk8gE»x .wg1|!6p!麎yרli!BPe%>QEDmx$F ~tS1*+9-$=PkƧ>w"DeBgeʤD&5k" 6@r\ FJ2騥 ~;9)s'H Zd*#3c H`r/{BeN)T+崍rGұ6Fhttd\:hMީZD+`J1ϬBLk3Q&}"?}5oĄ1L6$7VZ18 I)M%>3,OsYUK2Mr%Zo0_H#Hà#A2)bЂ H| 4 $Ӵc4@m`S+f#vEMHŴȎ4H;"QfR0F]A.4!+,*ԝі!XP3_كQB|M7=P75`YtUZk8q~ gOaߠV)QE%MaYmEpQ V Ϡ#MAd e&19J[-\*:cj߀k[0^݂ ZPnsYY 3ȌY*>q,gR*3w\L"Xnd,X)pM}b쏇/zVk%k Z?m~pVjdO/~RMyŎ*]7[튙-)3-ic֎.' yXf8".KZG;FEd;$N*2rIJF DθBVε\O-&1Tv׽>Ԍ7$>{VUytZV(}?oo uA^dtฌIjЯJmJ}]vr_bjwEoCm9oi$* 9&f;z(> $lS&#w'@:5h:~itFhdrvBKA `Sm޻O5S5ͧP+ >%*BA/Bg!2[BK)˰vM˰vM4˰vm^֮V=֭]vjr"nඡS|Af[7p纁+LbǼ+L옱P=b^ \Yy]Pš ھH#mm'qќM=Wic%`"XH\ 0a`\2,O"X=Uv&ZXoCmbRFSLe96J􇧝w NᥭeX;dj (5l.Sy(<|Gܦ>>j?}*^uS6g#q"$G'!InCH|-BY4,5,|BE$\ϖtU9GA^?5fi9{AJl=xF?`*Gns^); ޽[ Ddɐ8rs˟ᑘ Ir1lBt>VFA>ThG˲E&-(FF)n7PrtDS8jC-*Lޠ[z$ +K/pݹ]{\O~i(0@RN +k/+|Q>3›}{63cg&q)&چ0 "'IݽL֌=MkFx7MǗ19nVlR{ 3ɫɫ5hSs:|V:-PSK9'XICBu $py &?־,-lHN:,$F%D=KRL,vZp٨EdCrArs0erFCĢ5d l4/f`&b/f,zb͎i#+uᢲR^T@K5%OAg$ )~#9'Q\ŤLHik}8; [ڎo^2b`+yZ |ݱ@X6j&D"b2nĶMՊZj( Vfb6V-⹢MZz,_[,PT,x^9 j 2c%`mpy",ЌMnh7OORxG_5cju*?&E;vNCtCLtAɷ'?Kvǩljx~.Bσ,a<|:f#ΓwG$W,_^%eGTC7N7sK1ʝV_;'\}.;[=D]/Zy#oV'iŻ|u3EnΨDPq͏?33yDٺqؚN7 ZW>%cg߾\k)!'fH^1\|^ՖxIߞ(-{6]u#]UEuo8EHv"ىD2&J1~'I(FDɅ;e*OsIKlǸv+G~ٿV'i2 &j,"ІZ1XD~AT6Ք=㹢DzRUI5>y9ՄZ1mAE8mpfB}p&MޝvggMל_B}scB,4VB/ӨvPoWsy8VM) e]7݆Z3K[QE4Cb%ר9Mwo{ӻ4V\&oy=Qr9VMMXJQ`MEEiDԂ3s6"8@]͑kF9@<2V b9>ьxC џ6?~,|xrxjjc;EzCYij @ ّA svJ YH$Wt bzY]o7Wrp> )nmcg? MI桝G,_=#%im$qa_/Ngni9 %eF(B2 "V-N}(Vq@踁T!E%݀ը +vaCj"YߵB'VrX<&o<   {Xޯ;KdxиdIpggTxՈTD2QD,$j?lGT"R1Qq#c\nҢYÝ5++~tF7YRv„Cv#"[UPPI_+ G˽+iNF&c ip('C]3ޕ'ϱlq'8ʴ)Υ jm\j0,T<<ژ(5䅕.w6 @)"XzlS/ {|mL^ 'ڤ2;34I3Kbʇ'3¡ɫ+@M zԔIة}fwv Tx3>:&ՊpreTZ6܈j>LudDh#Ր; kҜ:2% ,Iĕ6#"F8L3Z"``TĦFr]?r{&Oi)-BH։DJTt#!/V\>ѧOi=чF8*+D|FU:ˍm)G".(78iBk׹5Nd4[K9N228jm+HG,O*q@[j-QQj8m5-̨]Q >z@m\s\8py*Q0`'\&vb&cBx5` ]MEI洁&:O<t(%Xќg3JfRF$Q=6HYit5ݛ 6,wgI˳7f3oeEOŻ*Z]夻J\ 16v"tFlj, Z}Mf{j% ,{P-Huj%)XE⩾ܡZӾNVoȾ. P钝X֌NxٍfE}|O 8=FݨF'XNT]PmIz򭜞eTpI(h r.'->}iD;fΊT+ 1nE*1z[葧\ <8PT8՟b]X8=Jx,'G &ǩ2tT39X|IT<.QD7%UX1AG K,$ f\ݦoMC7N|ԃ#0ki3!'r3O orar:-kI3N3&s",קZb]Y5_9iU)a0L{nZc0#kdte9w36Zi&IR0`˥5hj!j_.QN`!`9nkE0R5;*= ~7v*8#t L)qrv9.DS܀ k3PZ&]8$'@:@$(BS1fuV^X[hT.%Dss` 2Y0GI<'3jP55sA$f@3\BR,k0~ 9Ɍ^Gn28 i<)%!@2"X [Kfb"9s!HƔ$ Ӣfx Z^K;? iTR=d'4Sɦ9+UC)1DZGu0V{CEoPH2 \S0L$ޚՑ*;_+DDqTXw| ]/ѣ9"ZRHvL$g\;#D] pN'YPԦF3*23(!\Ҭլ  f' H}ssD ;HrCt|RBkL~{\'rvͫd4]suٝ ]oq"s7Z*|l'$C~UFIvEDIv5)(ɮHu'VP+о$/KlYPH;٩Sr^J,eub,;h0V,?\•<()]Qpf1 w?rF)x{p‘U8r*94`rd?Qv`_  lgrkoyy,ύ,gBO싍;NF:qEۢpձXB/;'_zi؆kk3EIWfCaYgַ U-oqU-ʬ5 E)B+Ü G|΍R[<Q Tu27}Q#en(-7mr[vgZK\2'qq"hl.Vfs,qYz7Ԅ6/NE{s<˾J\B԰4a#LibL'Mwm.RE*2R+(&C&SԒ 1!c](ߥe $VJ)p5bbj+㙷\r {a,')`0zh*Eƛ)i%AC5 $c`U!X ,,#QZx݀Дr;20b`, )vn)rSXH}< X- U22جc\:Q-yoXـ+F-Ff'UmfϽΊ"U+Ӊ[2U% צ0w_ˋXA?Êy13_|p"g"O`k~-7<} ~В#.￝_ޯXdd`Ј`lx  =]3 7^$R@hx@(84t8[C^fA]0퓖 F˯SH%Q}C`M}{[ȋ(p XkE^pBǂ=8}0hN+IQs0NX&6LCr,(!6xfm.J,U>GH բ{q(s|\WN4NW Y49RTLE* Khb$R]F>FjswIvo7DCWhTֵ=rskjsJՙn?kFف@da)pGP=PrX5&Fk!y]w5t/G{8ZpMv#y  oIx+=$x^5Q[\ZÇ#@yJEWhI0ƝKF5yyըw"/NA5E[>/Ko^2h„BG*8X@L|0폖<DZ~ac¶NRN3Je*7:oN[bggfd&F3ǥ"lyo ܖ^෬(viM?$?L#(?㿛d/U ޜ,L*F{~RLT09:U1gtЊvU"6"ϳF0H^MC.lp48Jb ✉Rj)"H3m?=06*=<٭\oM5ĶVX lX^+jqpԓGs<Ԕ OyfXjّ3Çܦ 3 Yds M+V?bzf2!˕bW_Z~'A "ܜ%XU8wȬ;f?W:7Qvm*Q>F8-ռ @cjNOwYcug vUI>΀ۇS7=hv6L/^4N\MM1ϗdNC.$EvVAfu$}PD.ň>?^moҖ+L#}1{c_VVoۄ~ ;Z5G;)D?$by/\y\o-w^GP:wdCd 7=\9[3Z“$*K$ѺA)?䒻CҺq ҝqO\LJ^ J|(=?ж+Ph xvӉj<,?v|S7Z#1õ4_=&LSU550e"\YW5]Sr}!l9џ6Z13 (Nޘ'& Buc/פ)6j!ۯdUPPq"2X`t1`ԊpXz8jJpK0.eKeG}Bخ]ĈƟ;{sg}jzruswx&Ow':]SoAt,׉V|h!eei,ơ6TgG4В}DP+:0qQtQaתjKFKGs%}iI 4r$elD2Ak 9yfjtԎH Crdp1ȵc R5;LHC29RPK)ᄨ p*S9Nm?c瘍RPkChz6j#iXn*HP8?7#ɕHִdiuo&p :fNS͌+gjӺ%ddbjᔕ_!߰7%Ћ-vFCG,jiX ?7_ޮ_}=GK!P`kI?$@>v3 0KYj[qʹiL*dFSk-HghP8+%\m&"R/0Y)V,~#c!=Az,ʢ=2e\Inz -?iCdr@P3 ˉ>DׁZgq<9H;rK~f\~WyN?Vܿ]=¶fffe^ˑ?4FȢn8x8MEݢFI?[-XlL J[|tvDHv#&v n[?Sv;Af׌Ncũ#=&A/?4ۙ8Ԋq;ڨ"S!:Jd>vb3E;ZnR 3!GkVYGH[m*\%d-q.R6Gϴ2tU)Isae}=D.С?qݭP;eAVWͯFdtT!ښ' ziة~dqTV5;{s/5{A P~]jMH]$jYt4Nt5%]}]Xo9ډ'tQe11:j=pڨЕ.}@h?HQZ Y(Lq`Y۰G+Ԋp -/߆u9DdoPK =|5o[^ϥS+)QBH5\X칰4l/83(\rE@ 6B-YtPoЕt_x@E/ڂ*PK`yt\zЕ.}@eu?rP5 9{Qo5 >wzٷMѿOnPvڸOō9&:-.NW¥> )9rcJejdAL|{W>p*;M7\v_;qk(an,k9k'{Q=]K{U9,èE3>O1Eu@h G\N{Q1'ҙN+ҪߺkCsCA&]Y,o.wˋsDr>rJ`>G"ɟ5wJpjq(7Nn8tNl3?ճ]۲`]Aƅ_7F=Ec3ѧ njn#_v%>5tOU=ꍦ_NP2Ok\~ oͨ0?=UCcL^nZNjmh-Dr+!tgĢerpý,/dωCmXM P+I>'!YsD rD^{W~^5mQw Y)em-qʪ*Ya0X 7Jj\KUJyAx8ڛxRʹªFyjYMktTР:帕BmF1MmRlMd>:2*rhT"2Բậ]4ufst~3F5UF&7J ̡m*j)FA_NGQxS]F;qbMIk'LP+C-5Z8ڠQVM]ZHG),&jqڬx,{G@1NԔ8FZL(Z/^bvz슙@?Ыm.쪰®,Iޱ [%,UjI7"fpZ/쪰*Z  X* `WqfP@?ա *j]z=f"Q+09ՁSP# **1Bu,vZrȡcK,j9t:BRvu6jJ®+**Fl}fb_ C-i;,7a ]εrs-\۱$I+ldG`N!w4dP Uj=cjbv&OuOD/'?ll^;7TgDMqJذBg]gWa}u7#WIEZ ykԚb_!vWF摭BYSqRYSG)]KA@2t J߀cݚ؛ګƉnhgjrQcU$6jdTcrF foq-Y&/.#QK_2P;3oUU2oX--nq3(Uݏ;y_MvK܇v0j?OWa_N SkĤ 7| wˋywW[h:?ϣҘwn%NE'vpLnMCp&Y&?Ğ?ᄨUϿe;%%YTD΢ w,j.t5~{X^(RQ[Qc ZLi)dռLi~)&p\׺ ‰HԒ‰èwPs^%HNv@Y=g$'i8AcQ 3'>FK8Q‰N '?t,Uj(dyjK;®ҳqs3?"wYasWz$Ч>.@C ϐpK3\Eݏx!/ZWZ `7|&ENp12'Wp-{W0O_W)yIs;CEx5ÃĭҼi+>L)w%T۪AXy'q?_.oh̎hkAYӥ| [?C'{{{-G[(*%~ycQS养) 2f{7^/4F.#ЬZJ cViж"l XK׳3jS8/aϖ[J3ec8I3ElW+w|}וb F?eͯ]aٕM2zjmF2r݌ &\\vHIp}73,\]N։圜,hu]YsF+ xǤ>43e( j({H I@"$n<**: RC B#o/.VH" 끤*.ϿyэX¤*4>>S>t|/f.*w'GMS?|"ll"ۑb(RN;F`Q1*{+R yKyla ]l[naFGJ~#\>e%fڃvwoָ={Zbʆ[fx#/Vi7BRxL'1p"inBE7fpLS"#G2!6c1QL1lqb#N &,b*b{;Aj/ed}g B 2B:{_) 9 "9G TbؒXKN"`9NP}l(N?u!.&4X GlLtΛ _=4? ګ;u +!p2&[=w(Τ7Ͷ4~nξ~o؇/~<_Mfv\)>YsZĹNٶؖa)Db6 P/EUpۏO> ύ񸜄.{,q:s ;dasA(G_fZr`"kJ٬Fe87Q j9h_%œĺO!#-H%h%wJ~eל<~k?V%@Y k->{*Zs2UwobΎwGf:u[c)} '*xʹ~!vizG/8 dc# >ځi^ 0v83VY''DJ[C69Δ eKG4aFij$H! W1bj-,˯ %Ն:JNAU,cXb#cK)22*QcF4Ija(a#u*<9xOPLW_]bl4o>r%Dj~2k 6(2`((2(p$R>cmՁ]u(K 2FZW.O  ߯CbdO^kP)ِ\I$5~+{1Qɑvx,|0{#N jzmB>bΖ(Ec΁.X/]ZtÞ{T kDzwђBJȑ _7 QG{ۼFǿxkY3|1Dt1H_j aF5EݨH5Huըfw~&lT+$Ǿ걯zacE['uFYT53>$*隊`Pb,"Ye,R2'OHD6IP"1bg$#p#% JsY;Y/n_nIm4@?K 7rH$1@g(wo,QJ9N")i*&bc @r8eh8w )QR %TQQQ_ Dik@*6` MUnt*F(ݧ:ac‰.0<*+r_߮ 4Cq|LҺ\7J](Ը RpvtA;1bY6>E`Sr'ZƘ20`'m"-ְ +V}-Nև_UòY>S8M ICc! H[IwA|W2.9gd4Ѱ!8-A.Ηx.g'þslH@ GC7 In;.ْ۾UazoPL_/%lu22UnIaӥgfoS`νl*J Lf⴫P^$e>k/,1ϼ\1,2|./jѼV$PԾic frR@!%z-F޿( &`Izisi,yF@SX}=S2{JblJHUBcVT3։JTKމJjTCG7\RjՍIX,j9Zyv}MH_ׄ5!Ϧ&Vr)@*(ӭ[1I|d WN[}NF7ïYDpEbigI\Å,kԷ(YxF.;oVRݥ(P{GdL*5cR999#ӓ2[E5ֺurr1֒{cdD ]I#EJ߮&9Ugm7]ZT:͞uaV1~@t r6`-l*Ƃ*W5Yaqwf|W+8JQMZ;!X!ဲyqIBRB=01أP4/UUH WX R='0[?̮;=+u&5UEh;,3ZHs""j_{ ~:~Lz~l{L ~n]\>|bqS-R>x-w_=\N&;V]qx;Sjve7i ,!%`VS;.r0 z Y&[X86[.u9&SXJ%7"W(g[l1]J`ղ6r8k#NGoCJdIr_g2ƷSt$f _fViK9{<㒏!uFJvcl$Wmm[֎<OǨBg}s60,7k}!T<G _w}ύH_Q%Iew}ج*U*Ily֒f'55I٦%Yp̓GȏFF;<<)qճӪ\Se=>`e/4*y&/U6>Ŵ?/AϝQm6nֽ}TsqĞu?pD>y؁ , a',~m!d f=I'L:IAxu!|zgԅvJ[ˢ`Pkb?\%& UKo-,遲@1JYKE8 %X PЂ+D'I(+ uYTDSj]esP7u ͝cE!}ThHC] 5k^*胸FVA#ʉ. v/=@pIzF5[o7倧bbH>>>h@`Z3lǤhG+|RfD~mўn0X8%N7eJ(])0H+ƥD)U1JЌrGjӔ)6XF)!L'6 ynڻ}{`;0^|%gi^x=Y17k^:8$<|bs6!@ >>P? -?6p⽍]}Q#6J*ΥTVGYâSB0*z GJ21 =Yu6lh&?b dMcIC(ms٤"rS7 ʮwނnoaF|#@^DkauZ^kVU 8TUUTJ(22,qP*9h(DQAs"Rs^69܀9YD-%u `,猌@P?I`"0TIJ*81N l4**B\)K^ [BK@RKj "F\%NO"c,oE?,{J |5z^B2<!OhA>ZޒgUtNqk݄4)X2Qn't:qL8u-rS4:Qnk@$HxgY=S86E[ y*S h$ꌇID-XuPk3$S>-jD:x3댟aqj<-A['9$ ~S;zZȅra'ea=xyZx֮Ky+J$QkHCy|dF$ҡ6BgFqfÈg̒7?IVBQ*{FZ;ʈ̈3#Έ;ы6j4"3f³.`mS$qjMM$P#3@`³P{QgFqfȈxZhb" F*`ćPϷQks̈3bIHKOER5# ̈PkΒ`āUYafq j#|)fT̈3#3W!$Z?Wfxk<).%~txSր&h^R2eKSҸ?aV!50kj4Z:!0-e pa.֔܄877nBvwFZ(@Q1IeC7J'}SkU J;SS3(!Քt9G豏%N%RQ&J KR)kP;p%3Jj(uU$G1-YS&OD!RʳI,zL欤5zNb綆Kg65.5:UJ+G5D[Ij 8]+]ފ_i@]sy jcPˡF[m甕mvmą%Gxcӈـ'jI*@4ֵ Ԇ4ֵԒ֠C ky]ڞuzNnQ'$|,c\ &#G>!UM. }@=lZqfr>Y#0⧥4ZL & p8zZ1M,2``=-)(ZѡQ OՕ,ssvCEsoQ,/Λp橥m < Z$"s0rhy3O{Ha3*Ȼ)9@jE4OpXf|.1s@xgѵ 4 *5TwZmٻUw՛I]EB=FxY[ҮMIp Ԋ-&9tJe9@t =$Q-uoΡ<*??1Q;;uut Ʊz}&,W;8qqrF;*}˷ōkxp+]~ә7fVbefZrSi+Wl \Y\Mﴹi1|>in{H̷GBQhDZM]_ɯn^TkȪ|b%msoc{ /p9,>AuZӏsNgZе:v >]l l-NUs/wwsj5zLM n!.l 4 &oL>3ĀK/~>a7llIoz~vMVoNymׁu?&nzz eq3J%B $ba#K"!=c-jtڙc9ֱ/!=Oɶ-*XfXamԒHc99ց<̀3[P [Oz'd _uÍ+vEّƑsِă8(8'^" qK J9X}IIU,&hj UI1YOnnhNk4<èۨU 'OH!;#5N"N!;#"~$9<ľw(p VKv;d悙 F炌5֥w!]IxWa$+TUwǻ;7'ޙQI[oPC?0[1”߄0Ip@Ԛ%[YCex3IyJtwfO.6 \pK=6F_b+o%_(%!fJ5vg梻?zխ(;w1]ޯ&SX\["K53'4 9,qfJQmdsJji쭄9{ jH1 YZԨ*,r"JʍOE."R'Ej)ryag*ha SIa@ҡ0LDhĐ$֥0@T2D2 ڢ&2A js9o%LKM! -UL2!2x!2m2Nid, 5vI1iQpΌHGs"%|lz]]CtueSLuIbrE@V))ʼn 5[f3RV]0AϡK [_m\ܹ^u(z&P?F`lhml[oU:[SZ1q!5Q/nǏ5Ϧ_Wr \V5HtFBi0COV*Ӯr` qC{o׫~co CDa@IPC"V9 % =Mqآ4\Z= qx.]p=FI3niQQDH jA!goK ;M΍9s%:85pBٞPMӇ_BQKHa-@ ms%_^EE\Rg(iEYahҳkv֮eZ\QBh(jU!è; 59dCCyKt豬d, 5$XjI@Ckd2<=GNX&c1iVf$&EHrd̯ؔW< 2Тɘ_{d2Xt2=og*u)50PIP0b2 &Da$($\:F/oAp幄u8ZIWEm4d,dx3͹jA%w߂&Ҥvu))Ei\01 ` \p4e!qpzVm4F(i4A~T%dY9E]R4u)j(* uVku;JR f{j` `! \|=ǠTs06Lm9!n ͌4Z]Fg[ԆFh§ O7BH"Zyܑ>P33s'~Gz6s4PKvv j!C T%APBF 3EɄ8L3!>?Bnƀ^];]4].TbUk90^od5 hC?.PHVԺW[|vqk7`[[ήn8M irY0T@Z c{U#7o';WŢr;mnZ yC[>3X0푠T<QK&ޮWd/*.o5dqlv | AN(gg=N~rG\әV+D3tm}rOC0pbuw\=GͿgM7PMATD5߾ܼ)*g@ɗvݽ_  +[~57f?7v <6]^ o7J==&50OߜsO^/\16@/$;z Ḱ؁?.9wX,_ jh 0y0 BQKHamԸtjz99wv?4a~"@^/nsr"7 d(V{Xe]rXWkm#+(~y!4*}\/U. -HVmz@/iHA*l鞞1I}p:Gl)qpTu!@i0&i<*Rhf%(Q01߂ȁ}n)հZQXaԏonV8ij%hws@;s@f^7ký!T+.ZCEb!;Tknzab8o<Ǎ3N93%:ǖ" L-0Dg6$K2 *Cj +@6ǖc>phT"O a DA)oG.lb2߭7N`XfMX`e:  XH]Fɽb]E@0G!}!b`Y A±+jؖr\y;PlL U ,7c3.*r4z,u5r$E%7_y1EEq58+F ~UB9Õ{nl* jJX{*Gρ*\H57{?|Yo1W^ :n3yBTEspTCL~eUAA*.cos-ӱ%~4rf? WL?Jl޳Od&cvCФn:]ŏ?~z~pr^NDlM]y+AJ[AkD:1F+Q1Y#OjD2*(HwLb1]%WmE 0z7S^. mFbL3#5ba$qlI`P,D2qh'a!!RfZ;;ŧq+pHU &*ɰP)B %4\h)3gNc6\"+bF `Rƺ?} ظLvQiE9CT渁f]9XWY;\vZ{'Oe<eXZq^ƓjLZq^&yLw^!QAH~ DiF20g͉dzpTpJ sn~NҝFͪk(jq{3%0Inl5fpr =(V ~/oVC]P ^| Vk4CM.eo)anEo%G}CB1TcȐ۫9 ?f{"BiONBwƓ!SHi dcىr+%wS!G0,J4Oqwhy{S @;l,bsbS/(~^J%D׻F~s`\3lv?SrƐheε8c( RDݝz &?]dS32xur"LrQcs=, X1.<(fs_%?}qqQ6/'z]:Krxa̓{EK^3&|=f`V[-6-Ӓ?*:j Tkrx2TDF9{:R!6P ߛ(~@jH%os/$e'NnOaz2~wap=Z|Nq`9?h'kp [Mhɹ8FYj[ܲrsqӯH{3z@VWZԔ k <]=#!>>ChroIA`6g =ȐJ:WMTPM'7 aH0lCvdڬ?jFGR]x)BX{cՃ^F>7O/LkM5G8ءZW/pDlW/fAOA#ؽmp′'Mju.ߦX{ ,'r=cISv2Ub(>^өK4Frn&kUugk{t9{e$ؽ9l_V޼XRL5$!q#S՝ZFmnu1pn󾀧rSk^[Z2u=FITڭ&NS^QN>a-SZSIxSU9aDVz4Zn6wYuͿ^.0(_}a/woM9ޚ˖ ڊd~Tc0j 'Ք %OlA2xj$uɺd VVZV2+JczV4޳;zVI[jxQ-9C OnEG'լ=+, U.B.B.x6wlɹu<b5VR"x6Ւ.BC m#FpT+҆PÛj[jxS`|f T ڐAZuS=ءU 뺰 IX*L::bʴ$Uݏ蕟)UK-D#q|ib#]3p8KRiXFb&, 6:HN0ϕQ" FOW%}>kQ]ʑf $WXqeX1W[g]I&XUJN /Z1_( Q,nR-+%-ol1/e^0]o|dB>Kؖ86*>:p㎸qMi:q+pXYبBl)={n;&' @*|TXAl`<Ġd#CC?\UN>| RnVZgȶ *ͽI1BVa~T+INʵr2E#˴sxhH^|Kt)7RЎrC]_7@ VfXCW"ast։Q%hCFdؔ-J{5AI߉nU2ޓZ Dd|m|0Fi6MS0ηw)v\8ԄʉWbJ 2ϮĠ$'t|]QjFб`עvaT3;+Y歜Jl:T+^Tk*ڑFo8G5'O)iE![TK]KwgMASղ-u3+Jʮadj4j08v`[jxQwh4jQMmEInG#`] uUP 5P 5MU!,u62n Eu_ͱmMx_ыGzz|VIOM2"mGt2nW ?eK!E;MAD -g O0kal]Bdbf)_$<)Qt?T!DJ$OSi3Oō&F$1Q}Kdmu͜2)1/'e7 {^^,vŰ..OTaܜX&VPlU(D@\@`&ew޿ Ɨ'<[l-],?owW0,Pg?ܮ4SEFwEtKBR>ϕt: ' WKO8*Ԥޟ'yu fERn]\>_-|(JEnүG}ԟiݯvQ]m9́ӹd2譢|@,PW{AZzof|_(= ^]Ile26٦%sb>#W·[n{3RwBԙH؏->Rw41Q i~W lWRL=KJ;+`[0&i< hb˧ S~Y+oD} SS`XCOU-߇뫗6~[Jp*={&fw6(̧#sv!?c4<|3a~}p?YFT?4g,h+J;CU{l{''`gt ZYS`Jesəbi~G D>tOFItϟY M'ڳ)/7Jo4U-0JͽX@ fSlWI; *Qa3\4 LiFH58S,[QG5;cw>wq]e\Sq>kG1-܂su$y Ν\-j?AeѓjZQGTKيZDZjEjZQ(_ @P*nxvpU2#Gq@ܐ2\!N8%&Lq5B&Dv]E!и"fw'.Rԁc!ܹ$d4IPK,"STń#i.8PS ޸a9xk2S_2.\W>"DhEj.";CnR?#(PRCd$K!Eb'iGqƳL C"s08HB" 'R'~XzA |q)ҐH{ElO5ST%!(D$$4ц` DEYBxm' wpq{3vR?Vخ7q)XluH\ Xf5% s\|eW)z?[v|{۪/6ץ;ټdh;5/Bm lˡ%Q4RGBpY@xy0{idcى2 JEGB% '_߸Z»oPJm\9 l01K{qSH'hڝzsAYLv28}$矃$*K\l=|4cJR:}Ϻ `-1X2:}bݞ{)AY&<#^c }&4N֭a\O6yCd;OIrR<9ELnj /f]0D TTc1eaxʹ[1 */іsTxj0tS`C %"B j .$b㪴UQҊ U 4ёGCW¤'%$ĤwNq AMOn{"@Ͱs3^)gg<|y6!Nɳ2 ~q0047dTD@0F=9Z%r-N~}DT$;}U-:7@}$:Nɧ+{uU5J]JM,ϴҜqJ<% KB=dA x Q֮I)L_ +X1 )#3⇚QxA%:^5#~^upW=W͘a*Qj_.Mmo\Lj 8߸sӕ> 4jhՃW ܀kP_~ZLZwjhr vc΋볊p\$gyIV>Ώ '7o+悕-Yl>[)=o0|=!Pֱhbrطh,dۙY&{k7)nV%lt9ۅ_E0hu;tL n>jF1ӌaS]s[?9ĿG;C0ξ]´ج^v5n8w1z#FLg_e:1Ӏe1ۂYγ`7?]ߘ?fl7^7krNyw׹:/25!,1颖cBBޟTޖ,ghs>ЛzH׋yDuT ]pf0+p,+imJ(4Jpi i^@X4VG|'2C[ )I=!5Q"ZG=ZR")R1*=sE9I>3^{x!!ҌO39gBqOl}ah 7!쁅6ZagZ;YdV{WcCݰPԂ61tz5A*eRLIq?fIdL)Q܍f[]_^eE^KWTZݷ&}Ɍ- 4wƚ[qzxվ8R݂t#l],L|wYUh[ Y-raOs ΢jX4`!GբvM,;X6 Mn * 0 (X6vA:չM,;c,zBڔGPlfWӣiPKXbW] ήgœ?NGPs`Wacyz jAHbW]%vu]7ED{WaFQPOQ ,S;ĮgWsf",`Wa%Q0 jP8XbW]%vu]qsW\S ].gbEq4 f2ҒX֔8(Ejp" 8P54"yաGԡ9Ԭp;Sܵog:3A9"3yEE9K;gawntOsQ+Qg+QSIsQ ~GPOQ8Ds}4&a&q1!in(jX"(xԓ{K4e\Au8"$Z^ǙnQkE3xΝ&Y]"cZ(0Ԋ(0Ԛ([BZ+ >JaY2+IX\jrvD+wJi^:Ӆ5/R㽚xR+[`_+2\ogM_v Wyc5hr #W^ $/ ݷnL+杳VCuOo^I!Z}OGlW# ]媇Z5^4Ǯ9*ڇ]N֜U`-Y^R-rfxk뜔S6qB:}Þ`2s) }\o7Ӑ_ofбDhTL?R4aN҈"=FՅ`w>ԭ/s2ni۪-etCq)8e:A`9+g ҙog1e124o|ơO3RBam*ҔiBʊ1)צ %!uYW \Ws¡BM[n"B G+!j-Pj0rsqf]KL$⚖*5תh&"+ j5&E@B` V "!}B S0;W:[UhSrYJJY!5-2d"#fn/ߢz鶰mw1rߕtmP:i}Xow77c$a2y}H6聀nnje~Νř__Z7B MWNo#2B''z% V2fz]ՁI~n0b-w~X܋i]ͽ1hN]loz#c7eDGTz=9"t'Sal>5ͱbc[EuS9ЌEM֔We Ee:sss]޵jPrveen`s+1c|Vgm33-f٦h*U,[ۑ߮]ɊfbKexכUys˙׷}q=eW䚕eʚ䪶&\CR0NG]og?1hf9Y&Q&F"q- [bHөjPKR`JL)/#EzfQ=Eo{vU[QX64>kfTs@!Nva\QaܲM#2Vt9c> ^G*qq%(ҝ:ѩOtpB6 iƍ(\Y^HID)Jcq W##kQ)Tg$5CG" De!0Ԙ(B$ܳFM[;HR$H> &9SJ~D" `ԦL?a @Y5XY1N*S X15BQ)%tB*bl,TX8sgYlL-1Ngٖ^6J_T4'~x+`J &Jm{3_Xկwl3)kn8R@y G@(9&e-ŊyCB   Ժ',!0cKv(U(INȨՄ(4'L~o&̖l('ʼ:,+@s~$ yS-D^;qB5j,)m{O#W,q$79ڑ]\ Vs8Ou`E&X ? YEA6-GoW!yHK û~(gr$:Hu M\>ƷBi8#R)ii{jͼ%&!_m9[mMK岘O4A.o.C l=zR6Ke|C:";^D=>jp#|fߘ85|Ԫ2~UKo ]u[RA|84"nqpO`DTCt? N "r[8e~j]:y〙:&%θ,v;*M© kSYiPK-:J8suSY}*TV #wnFngyu6.P SnQ\"hyadxL>"r1іV uJ#/zjFr8Q>w/~:\xՍ=. žF^2O3 ws?[ouq6ԲN#_ ճ[7wW{PWh=|2=zCR_FiN%!b&*u2zρ_#u$ǖ6^Qn0U"YErT s0^LxH9tum7nTu@"TS݉4]K곖k5sXEkFfkpd Cusgg},ɼ H-!pu3a a,fmEQ:D71:7?`ݍMotYci @~;l?QcT0=VȀ_p- x~]]FR0| FMY(Q늂9)޽<5[A%oɔ ?8)_*Gc;^&(&Q~=7"U'ֲ} hMϱ1cZ{YUSkJ =M%JxP;=a Z]q$J%9ͤ'Na_!K9)KU D¦;bItYUr-wF;#*M +%јYoTH,|Q%6ʰJ8׾%Qwu`4p݂.~ہQS +. .gIAm흐jȴj:)t7L!}cN@e'C-5"ů'ބA} +cLxu1hfɸ-_= Dh Id 2HCTF)~琀"P/19ZNٓ,4BZϣVI 'mWIfJb#%{xvqCCc$}_GӨ*ɸ{yYiSTIiv*$UV6:XUI~$>*J4j#SHy#$y2DJ[/=x#&ҹl jŻDv=1Dj[]Z]})+-1kfIWB!Zh$E҉ iPc;1)' d2V%ڄ1륔07_(0åqs\bfӵm^;v6.n/GY[ ,H"$,'jH)vZ[<!9᾽aƲH&홖ǔE#ϰ9$'TgP7el'Zv>/9+#9 ZE'n0ڊs4݈T'UmA$hBsxDczæ{7?ws u`L=g;Fmģ /ryN羹wi$x3jYƦ=;:,lwi*>pnE6Z(aPg?j$n ^|885ݓ,Bk "`v]mBWPS[!olIe}5=aj}UJ[n߄ ; ?۰r/aeM)f#*R"K/ݭ6P(>b>J}ٵ Gh$Wb&ŠaM,Ԍ\鷇NYW_BzjK$ ,ݿ -1vF xژql햿E*R!!_fCXuޖnQK}Gv;V΂A&54:C#[rV9ƪW@5m %jjyL2LB%CѻU:%v6d Z@(W Ko G';ӎ[Dp V:^j&mȪ }Q `u6V:d_ŽLuPUcډfDY7i0f#~w97 8]QE-6OJ N#D{O.8<7:+TY"2,T[;zɒF 97++,2P}2(鄔=&pli/e2)eM ։{ԜN94š3-ܟj jMuss}ȹ3P!%JOr*L4=~x/ZK= uI7tHv/lq( XODNYdjNtѡq b:De%1j܉bԜUlT,ȵ,4ьN{, h^(w4 Znbtu$(*?o}wJ;x_ doȞNHHh3)JoJL+J#%X׫ί&Nqҍ5 Hj"`ay"Vt# j1 jNi'0>t[q̂` it$Ű/KGґVYf_s:4Ӎ ˩t~fi?>] _un1 juif%ioEZ-,@|3k,$\-`Ri^Xa:#Fh/D1ԇ!F\n=Ԛ޷>o e` TWd&B oE(/&y끛*g0AVb%ܱOA|!cyc ń1YQ˵ra (/ Vi^D\CJxfh)rB"W&Dv~<`V\VJ F f--1ɑs7gfR54b޺P50PCUpƷvx(ޅb3&B  .vt1(%(r)ҏ$|v·`Jّ+ aYɹ 'rARqʍB[?zdm~/IMdGp@HjMnݿ g׶T׆DUHiQ(xxDBc5sYu r1kw3p23b,¢ Ukh_ CK5Z> 0EҟlۯkPB#u\y sVV_-Są\t]gjF^HEat7>I]9UJ_x-+ì$F >֔%SG815j;!bR):X]sY+ȭ%4JN'ץpYTy0K\hM(LNfIOhPXl2]31%-Y⚃GGkOEQRNڎP\7~>ۻ`<[e8!HyQZ %"7w9KpS"-_S?s4%8kr6\+ 1BCcc9EN1=|yxFmG[퉫<;-.R8RJow-m$ XPϬ*EųtvŞ@ԓDФ8wGnvCjjzwn旕|7m7'CWa/tN9P$틧 Br?+`eU*WKI qWSĤ])D7{u N)WLY=股 FR)a1{@'t-B/‡OwBwв@ ICPDlC J%̰LmA MSeNa Orx}Sh] :j7 ;e)\.Ҫ'/l!gЏEV ˔JrF]Jd,uY_%fys>)9!)EC9ko#'Ovƅ`'vou'i}LD5M$i ԻEV*P8Zom87s49q"|d&kK%4s]q]+ZJ ԁUс0 +ҙ K` ƦL3O +T4>82UiDD8[.bgBoTQL6ʗE-Д%T#1#ZBDM kv2|RXwQO} e 12< A"i zOZ: 1*9A铧:Z5{TW\F*fz~.]HK$ }_݆it$SZK6A*{!vӥVJ kp~ )eF,Z~ 4J I0%r ol+8ý#>Di=̢{1L.!*A ׭אVNd?GԒ+XX!z£<NOn1 U 94ByhPLw/wA>!R]C+ীǤI}g!w5R,n@6]ΰJ]%uE-,5t@؎ [Ҍ}?%ӠvcaL1})"-igH"z!ZxHg<2EbڌSDLLjuHk1azǩ>j8L u TN)SA9/-q԰Z3.A;ݐ%j_yTwGh,+&onTckZ^Rtj.2nBgO X&V s:{ Om}'PoIרkjw)pvW?vpPռ 0MYFOOO̓Y/'wIExLT*Mc8y]z'1m;/d^/N*gYпuuv?7@:mw?B?[rZW+Q֚Zu5i^1y|݀6)~KWg5lnM¥R3+$X5JC$U C XvafeG%F_{{uýzJi\R;iQS2c":8{xj3=amOv8[S-qZR>aft8S}Նq8s9|c8;3SiEQm0cT=0=VCjvkgct?F}E6ԕP)X-3-T D<ܒjC8 vSZ}RDwWKLTedAp02/%p4E 7XJDq=R;ư +jNiDǐxÖ9Mu)mD8>{,e]X ˯@RRR"A20֔F J#b-r);}-zcAkKVF+o%QZ1q.e`NKtnU,X ZޭT;x :qKSx[uOvP-PF+ӡ]7r|qgDу;Qm% "lIz%՝UnsϞSdc<aǘw<}+ǰ-\s5`sKG6/ް"SثKZQkGF];dQ0|<+TsAaFoFx~6Sf,'Cv.,z&YPoGRƤI$)JEŽЖ2D 1e,B`40o:`V>a7M3 a)@ 7E6.T95c?O6Ï ܷ%c MPs ϭ5 2j9˿zyް)ɶhL CEuoN-Vy;&*"^-TxȉSC,6Xs2`SoΖ' QW:=n~1o>ͥT2/51A7ao8N)L2;U,vQHl5Wb2FI(F'&)b L%  t ؾ!t".|Εg 1JiPALiɾs-9U2@C~dV<}F7_;0)ήKdHJQ7l)y0>ZR9T >Ta}lI [~2aǺ.ˆe;jCeÓՀNX˼d@J0N=Q 0ò=Ֆs:<[T2j@~U7o1Tk#jMB-ZaLQ{#:Ns5U091G4 *AELyw@l0n^.׫] }lXnSF:/ ֗4gfa$(ti _VgLe UvXiXT pH,BhbDjɁ ߻g ݾ历& u-V쳕Pp.6R;5y/BTASr2n>Σ\)ʈeJɭ"63A\*}&[K{jeŰxpLDcɧs~bR4 CN:?BgtFYO/CzJGm=} q3OYLXKL*eЫn`3IڻAfRm_[kf8gϩIǞ/cf6ky1 hnG4lvT+nI5Ad[Rf #՚1=f~{mfj:3-Y?׫e6ȷ}m_'+W!ڋ~/,:amWnIL.^LmHhb;%ܚo ւ2c̥şד[EQ> n5im[+}9=2CoLd YrHt%۴8HِN--vgR4t=$_ ,.wԑ?ä>`r\8,p'o>;t9^&fAbo6ˑVeqA ~ΰP b~I#;GXf(OC4˥Aed> QI p 2S`yX߁N=C戅M" 5,b 9p,3WRR,fn)^IKMRp|2+&2[f`P:5uFx pkv,I},:4ؚ+ x:nu"N32Cvb,u'oM $XmMP}ЪZbE@&}Ъ:}[[u./EU~a2yMԒG:*pW SݥλyXyW"`]C,0t(j-Pk6GD8XQ,0qTQ>>ZǼ3;ʖZ9q@~ZUZ?m Vd' xQRA5%?*Dx-o 5s;ǿu_U vVm_iRF-{=G{N(*hOg ^"-?ļ)]sE"]Yx蚹4͛!htN9+dw.U^0-ʘ'.1䩭-Id'BbaR(kTȧ&``0fr_Y!5e'".4G6{TW4L!i> E OҢv>CWhb$wH #$hzeMizyJি*ϊfktfh4f!.QߍTdEvSʵP -7/s0VXdτ9++FF vn4&(#~~@d<Ը5 L`[`Xc \0x6W>Lb͖ٔF:݃S&)Z\NAEm/Q+tWtĵI< >_7GPTFPT'_[Mz&FMDIx.-U |ʲ%pa8\ RG799.wbb >HSFar5ȳ}:H O#MgA%U_F+Ap8>سaFq;-i-5km~ԃ-`pwG]YO+ ?:ru[\đpb wBJlN,1>dbM쭉abdX:W(H[⼝o0c΍:nb[b:(8 ~eYUI3Rsv|偤>8I rI#9X5~O%C-8Tb=Ԓ(N%Ds-J`wSݩTѝJLaDQ>Ƙ2ũDJv@;2 #;pj#\[_EpSoG~Bo^8} IS~8띆ޯWa) .OE sC,eB4y2g)F: 3`RnEV"u` !ezeZ?L/nB%b M _f.a^,5)ɣ(~u[ִ[.#c)<c7d%:V" zWJo:4 d dlguK㰭렖X8j-ZH8j!Y].Y=fLcG;Yuw_ĖU=:m_vڪV|QTpX;FF> NTqBw*Z$*zɾ7\P >ԃMԚu* Bx*&3:eu|jSU:c߆0u\8 &jXDaDMH1ӁڷF;븳:ό3QdꡖF-` 1u٢ֳEǙHo߸EɅ,fH&3 R*4q̲#c+$`e[lkƑjY^yp""*w<[6:{5bd$`Icu_f,\ ta?]U[DDzl#屗v{XdNlqbI9q9neb&1Nܖxٛ0qC]KWϾuEVfMWb&`9\S<,ޟˤM/u(Io݄DWj%kVȧ-[ @1ftXbV%1샔'glT\ym;[,)2jMQg! *alPyƋ-ッHt3?t(Qq#;8<&ֺ)>>K81>'y#-e79a';9OvzxDvy)|u{ۗmlMòU%}-` /^&%;=RJ~;6xc -ې.A.c.@HK*b .CĿ7P JiMR|Lݺ_oxrA=ixd͏([΀}?^KKKK*h-pn f)͸q+$U6H"fRj!p\oO͛/dz/FN i:iؕp([,A>cQ,޹ʠe~7|o]J3ʦ"~Ue 3<Ό)DaeU6-tJcL3]hRZPFʚsX^7MjݺrY^}`yb>~ռ4JYNʶhuUd_0Luή?Ux9sG.|ťOQ0EmK4M1h=Oiq:_\Xz&YN& PLrr?VLeo ~wom.7lfs8٨HWX!o"R٧`O> &\%οN%&ۧזS5KE ky]IYّu =g`fSYWӸФ^]j-aa.ߕLQOS&E.,|js tEC:;'^d=j!ēH-HjV(N1@=XwS b 2pz(I4`&]1V(<(*49kĹ+s gmH)|՜-(ڟ7QdBFz{" vPE&jǁ  Z<]nIRGz (+ٻm%WXz9M*=dm9Yb;arhϮn5I߷A$UəHmPDn4-ݬvIY ]26sT~ RʣwL댽K]Ǥr gTBCP;h" 61,zpg`!bfD 8VĩeB(f9A I8HZ@Z:$-eIKh`œ^$-i.[RMǝ0jj;DQVQv͉ԫ͉+ڋl6/V"9C6g ٜ}gsX :+{I3((UN0 l'ZQ-!<f挕,\I5,q,0̤㨦yf"i1bp,ժ6/VV _<NPަx8)QF1󑶺a~<1];0l} V ,ʹSBS.3ęCYZHYanc᪻W蓙~;g~_*(ܓk ȱ5ًT G)׊׎E.o>2$fr|;Ën>U϶?HS S lIYblW 7;vlv)Rĥ;L*;W Ԗ=_+7ݎk|;~˒'iQHL$_X|S I4 :- ##Lb#0%łYTs{[N4gPw[`JHםK{3A)j%NCpɴBқv6} ]Nxr"ѺZK`%8fF殛|Y~, cP! I3چ 4lzz !Z{Y47T, ?a'^0G:(j%suai/=ȋe5UoߔԱMtvd{&>g|扙m1ro/A"CU([Z&7sjɃ_NΖ^'c<⃝wW~{ݴ74g~TL@7x0Rs>^']Nr!wpN]~h W6:Z kź)^8uk˃i;Fu; xo֭auBC^6)P;o-:1ZC54li\BPUMN[{KTKkQc,R@(Z^xGR_G"ޑTSы(py( #,ϚKqˆk[ˆu- $ϥXa^s_ʞB_?Yd`^ܝkv=Ǽ}ZzN2vvq޹rV쯗|ta&]2?5${D6 ?U;o&_rˣXY )$z rhQ q; ؈4Hlp-Tݵ]{KR5}E!kGvq;Ɠ1:Y),Fj TS;\,Lц1I7VC2r}҅;l7(D HP+o."|"fr<?3R8>0p ^Uk+"v'w0aMAѮ1ثP*fkK3LR00  EǼV2TQ^i 44ZF(3Dj5!QdUPkJ 7IJ@H!OȖ|)<چz*O9lln3aAsJS,$Ҕ q|+`4cyǰ56Uk+&8|^oY6\>u]N_&jdd2 SSL39N#PNM&8w(W%s;[@-Z6d#O%%\Cl{X |jh%n20oVbb>I,AAxh|`ӋAf_גe9)!tt$1Z%u'dCU D$uf?H8-xix!HDx"X)\~KdPOða}F]Dk>yr E Nw|3Q2X ҅TZWTcMpS +\.?>EuR/~hq& mb?ޫGj~(JpL\P1sh"/akᗉ5BD pH *ͻRgYR]m:K5]l+Hn}cl[X%GI7jb4Zd2q%UFZt#(")Q&0/tO6/CFPZB }5bn?^p /+&+q+~;6*o_uNR‰Dax} +gX3|Wĕ__;W'1BdT/W'`"T%vVbį@3|[/Q!g$nz}[bJ!|G)F@{ Y}O9̟jʭMK]RX,.ORUbc3caq\&A0aF9k!edbݖc(hr;X<6kͯy#уN5Tשf;+TSN5C;ՀvR)}p3ϲ\-22N)ƅ֩3Mq9 iV(lS;̪Q1J.>z8 w=ie/objAw z GCQ,ˆL8j r`+ PQ('Fr 9™$feGbr1u2BLޝW cC;&@z%xw$Xh8\שOJ0G/I X'Q=),*cPG޵/UD)I" )>$/,bP3IR{~{ZW'("A?_3Ϯ?;s9esH es{J L kT `ӃR]/|88hk$K5kN`Ƹp۩XZՎ{8v`,XeARJ}i܈W N̎;ohp̛O>˿]x*0Oa7dDh?;|&}߷:[scz< !NíΎklx)z<:}$YmEߊ5Xh gahBus a1՚HpubjEj)vl˃v}mLj(y8eZK~d=:dUNqdպ![KN1/}Zָ2֭ y*Jǹj$}nmy:mLjn"D{n [UNQusub=x@@t1w۰Y]}d9;c{Z_,B-p2}!:bpl&՘n]^ QdsT| {4zyiri|]}a'%sl63xPٵ&%Kq=$^"E#z6㍲\~nR\&!?*I~ދ@UvnГ{|ր{/Bk2nNC3g/Θx/TWc7\M ?]K #F TT}~c0ۚ-7`j)e(}uOnމ?x!>Lofrz9NCz;dOc/O>>VdYPLz=nRC@T JAC{L4T )<7ưC 0:Cy0<0aybk7ZUR %ͩ'WO aP-X+1a;lG61tx*x\a:3 GWa.<]"ŕf aZW6Z ^w~|UO]֓Ձ[DZj׽/l{ yÆW)J|gFE($qT+&zIuӣֺQTKD/Q}!TTbh04 A ܏iaڏd@Z^$hRR-9C2`Ht TZqGQ-4vQ-Sm<GDhIuO8US^wobj5!>!>:0b*꽳;4 p:(Аܻ?RM\N͑"JFQ,&WD9FhJk 1SztQx?NUT ps:;:y`† >oqT {ER-jٓבT5lyPy߆o/e8>蛑->ys:'297rlFF2JHHKn3Zּ°me\nt9Q\2H \^r!I}`Ei W*ߦm9{|Urk-ׄ?<\BsH0փꔦz7sW3Vt^tkC~Z>dn ҤԞN]nr5z= ADϐ.`%ɷ$޿Jο0jB%j'KB7irQȯ-~7Yu,.= ݖp>2z@;{5|$Tkуp>juT_P1!·eQeQT "H/m}P,eң|=+4Bw} iN cA 8˴w/2\$#,@t[%Yc'Q\ӱH1.QaeJ좽3\90e֒cefoJ!8ϥ cN 5ZcQ{;Ht]@(2t.yIgYV8Ğf;+?VS\>gYJo ׻ 110bcmTls@Nnj\~;00) O`I4qs`gd "/'MM:☌E@ FX}*ST%n%&n5}oxc;/}zJDO[XG[qg-v\ } r4۽6TLRJ"5lUHdk䲇ctll;U9%G\*oonj d7(AiL‚uDZ_]S')rYmQ-Hj7Mnv*W@-C Jk+G)SAYW )#U B X_pBc*4NU$8 4fp1l4PJ͘BG{ԇzт?vX{Q䨕6Wc/_b\㨇GeXHc\ҁnD_׈:iVtU48 ;q?"Y+(PQxjK(PA QPR8ty]KɣlBXtw`)$I>1*wv ?U`?Fڎ@'j9k 䣁`JJIݙtQPEhY7@ p,e.ybxtJ\: A7P':jgQTG 'ڪ6kq+0硊{cb .K ,->KHVģ`PȀAо@ܶJaieEgLv) ޼,XM'f"Nj(MbKfލlcb .wS/%TfKDiPi?)R!a$Y!<ujD5Yr% fI3ʫQ LiFyRjϔ&v QW=Q)˫&J\(5c+\1%_tV#I)+w$tk,s,_B޾!p'3.C"!刪hdbcN`8Id1.#Ft"g5V?aL6ߛ(}Vz~Oɪ@W+QN_~xy/dxu¤h*3~M쮘H75?ެF hI`rb`"p zvml!#}  \R fkBN]vYg$`XxTن߿v隸'_jP_Go3L" y)Ž+·]oOt+?,a !R5$?KszЙynç PG7d;Ox[q:to'^o`RBpV㾉*2YDs^lc!pS{6ťصHh#i9BOF+%"K"භGFgLOg4$@׫Ɋen=AU/wzލgV:((MWd n1DJ[M(;ŹxcRYQֲwswXpee}e(1EE4ΒTl{?jGĹD(EX'Ae(ai${PO¦3 H(Z[XWL SBTL\5C^!k5<Ꮿ:\f^ Psj0r5^\ l6 ֮zڎ`yle^.i֪1<[0֌)5UnۤQ /OffbٚxRl}!PBf723XI5Y)g,K,bg I Z2U9rv3Et~veyZڭ~vkBB^֑)x#+j4pݮwG< Ŵ[XGքp,S62D(B-1(C [iPD0rnWcN]̓Y6l}tL@:WX?Sf$%2Z׮ֵZW )$\P3I+PP ZQ!jۼR*ŗQȸJc܎2lZ"Za?zxZ egvfnf.s򱦢iB-j뉚jZWf.s[,ܗarGZ4sP "[arGE\.Oim}sgjîiz#Cr 30sC=qh |Vtrbכr\* ˺.לO@ /\f#.MFK+6wwU$Lߜ{U 4``M e1 r TN본ih; iشpxS灞S!gUoX|QsZR%:Ujn1s;pVW%+_Ԅ|Qkq#Ԫ:g?ʱvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004745266115137606255017726 0ustar rootrootFeb 01 07:23:20 crc systemd[1]: Starting Kubernetes Kubelet... Feb 01 07:23:20 crc restorecon[4583]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:20 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 01 07:23:21 crc restorecon[4583]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Feb 01 07:23:21 crc kubenswrapper[4650]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 01 07:23:21 crc kubenswrapper[4650]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Feb 01 07:23:21 crc kubenswrapper[4650]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 01 07:23:21 crc kubenswrapper[4650]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 01 07:23:21 crc kubenswrapper[4650]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Feb 01 07:23:21 crc kubenswrapper[4650]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.684745 4650 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.693352 4650 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.693502 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.693602 4650 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.693693 4650 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.693783 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.693894 4650 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.693998 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.694600 4650 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.694711 4650 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.694807 4650 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.694944 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695172 4650 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695230 4650 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695235 4650 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695241 4650 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695247 4650 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695252 4650 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695258 4650 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695263 4650 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695270 4650 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695276 4650 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695284 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695290 4650 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695295 4650 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695305 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695309 4650 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695313 4650 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695317 4650 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695321 4650 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695326 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695329 4650 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695334 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695337 4650 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695341 4650 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695346 4650 feature_gate.go:330] unrecognized feature gate: Example Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695351 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695356 4650 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695360 4650 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695364 4650 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695369 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695373 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695376 4650 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695381 4650 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695386 4650 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695394 4650 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695405 4650 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695414 4650 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695421 4650 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695425 4650 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695429 4650 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695434 4650 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695437 4650 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695450 4650 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695455 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695461 4650 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695465 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695470 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695474 4650 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695478 4650 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695485 4650 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695491 4650 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695496 4650 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695501 4650 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695506 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695510 4650 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695520 4650 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695527 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695531 4650 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695536 4650 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695539 4650 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.695543 4650 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695738 4650 flags.go:64] FLAG: --address="0.0.0.0" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695757 4650 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695773 4650 flags.go:64] FLAG: --anonymous-auth="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695782 4650 flags.go:64] FLAG: --application-metrics-count-limit="100" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695792 4650 flags.go:64] FLAG: --authentication-token-webhook="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695798 4650 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695806 4650 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695814 4650 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695819 4650 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695824 4650 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695830 4650 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695835 4650 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695840 4650 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695844 4650 flags.go:64] FLAG: --cgroup-root="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695849 4650 flags.go:64] FLAG: --cgroups-per-qos="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695855 4650 flags.go:64] FLAG: --client-ca-file="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695859 4650 flags.go:64] FLAG: --cloud-config="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695863 4650 flags.go:64] FLAG: --cloud-provider="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695868 4650 flags.go:64] FLAG: --cluster-dns="[]" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695902 4650 flags.go:64] FLAG: --cluster-domain="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695907 4650 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695912 4650 flags.go:64] FLAG: --config-dir="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695916 4650 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695922 4650 flags.go:64] FLAG: --container-log-max-files="5" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695929 4650 flags.go:64] FLAG: --container-log-max-size="10Mi" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695934 4650 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695939 4650 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695944 4650 flags.go:64] FLAG: --containerd-namespace="k8s.io" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695981 4650 flags.go:64] FLAG: --contention-profiling="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695985 4650 flags.go:64] FLAG: --cpu-cfs-quota="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695990 4650 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.695995 4650 flags.go:64] FLAG: --cpu-manager-policy="none" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696006 4650 flags.go:64] FLAG: --cpu-manager-policy-options="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696014 4650 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696039 4650 flags.go:64] FLAG: --enable-controller-attach-detach="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696044 4650 flags.go:64] FLAG: --enable-debugging-handlers="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696049 4650 flags.go:64] FLAG: --enable-load-reader="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696054 4650 flags.go:64] FLAG: --enable-server="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696059 4650 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696070 4650 flags.go:64] FLAG: --event-burst="100" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696076 4650 flags.go:64] FLAG: --event-qps="50" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696081 4650 flags.go:64] FLAG: --event-storage-age-limit="default=0" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696087 4650 flags.go:64] FLAG: --event-storage-event-limit="default=0" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696092 4650 flags.go:64] FLAG: --eviction-hard="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696099 4650 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696103 4650 flags.go:64] FLAG: --eviction-minimum-reclaim="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696108 4650 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696113 4650 flags.go:64] FLAG: --eviction-soft="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696118 4650 flags.go:64] FLAG: --eviction-soft-grace-period="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696123 4650 flags.go:64] FLAG: --exit-on-lock-contention="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696128 4650 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696132 4650 flags.go:64] FLAG: --experimental-mounter-path="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696136 4650 flags.go:64] FLAG: --fail-cgroupv1="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696141 4650 flags.go:64] FLAG: --fail-swap-on="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696145 4650 flags.go:64] FLAG: --feature-gates="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696151 4650 flags.go:64] FLAG: --file-check-frequency="20s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696156 4650 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696161 4650 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696165 4650 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696170 4650 flags.go:64] FLAG: --healthz-port="10248" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696174 4650 flags.go:64] FLAG: --help="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696179 4650 flags.go:64] FLAG: --hostname-override="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696183 4650 flags.go:64] FLAG: --housekeeping-interval="10s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696188 4650 flags.go:64] FLAG: --http-check-frequency="20s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696192 4650 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696196 4650 flags.go:64] FLAG: --image-credential-provider-config="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696201 4650 flags.go:64] FLAG: --image-gc-high-threshold="85" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696205 4650 flags.go:64] FLAG: --image-gc-low-threshold="80" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696210 4650 flags.go:64] FLAG: --image-service-endpoint="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696215 4650 flags.go:64] FLAG: --kernel-memcg-notification="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696220 4650 flags.go:64] FLAG: --kube-api-burst="100" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696224 4650 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696232 4650 flags.go:64] FLAG: --kube-api-qps="50" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696236 4650 flags.go:64] FLAG: --kube-reserved="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696241 4650 flags.go:64] FLAG: --kube-reserved-cgroup="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696246 4650 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696250 4650 flags.go:64] FLAG: --kubelet-cgroups="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696255 4650 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696259 4650 flags.go:64] FLAG: --lock-file="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696264 4650 flags.go:64] FLAG: --log-cadvisor-usage="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696268 4650 flags.go:64] FLAG: --log-flush-frequency="5s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696273 4650 flags.go:64] FLAG: --log-json-info-buffer-size="0" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696282 4650 flags.go:64] FLAG: --log-json-split-stream="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696287 4650 flags.go:64] FLAG: --log-text-info-buffer-size="0" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696291 4650 flags.go:64] FLAG: --log-text-split-stream="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696296 4650 flags.go:64] FLAG: --logging-format="text" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696300 4650 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696305 4650 flags.go:64] FLAG: --make-iptables-util-chains="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696310 4650 flags.go:64] FLAG: --manifest-url="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696315 4650 flags.go:64] FLAG: --manifest-url-header="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696323 4650 flags.go:64] FLAG: --max-housekeeping-interval="15s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696328 4650 flags.go:64] FLAG: --max-open-files="1000000" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696335 4650 flags.go:64] FLAG: --max-pods="110" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696339 4650 flags.go:64] FLAG: --maximum-dead-containers="-1" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696345 4650 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696350 4650 flags.go:64] FLAG: --memory-manager-policy="None" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696356 4650 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696361 4650 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696366 4650 flags.go:64] FLAG: --node-ip="192.168.126.11" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696372 4650 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696394 4650 flags.go:64] FLAG: --node-status-max-images="50" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696401 4650 flags.go:64] FLAG: --node-status-update-frequency="10s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696406 4650 flags.go:64] FLAG: --oom-score-adj="-999" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696411 4650 flags.go:64] FLAG: --pod-cidr="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696417 4650 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696428 4650 flags.go:64] FLAG: --pod-manifest-path="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696433 4650 flags.go:64] FLAG: --pod-max-pids="-1" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696438 4650 flags.go:64] FLAG: --pods-per-core="0" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696442 4650 flags.go:64] FLAG: --port="10250" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696447 4650 flags.go:64] FLAG: --protect-kernel-defaults="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696451 4650 flags.go:64] FLAG: --provider-id="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696456 4650 flags.go:64] FLAG: --qos-reserved="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696460 4650 flags.go:64] FLAG: --read-only-port="10255" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696465 4650 flags.go:64] FLAG: --register-node="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696469 4650 flags.go:64] FLAG: --register-schedulable="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696473 4650 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696484 4650 flags.go:64] FLAG: --registry-burst="10" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696489 4650 flags.go:64] FLAG: --registry-qps="5" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696493 4650 flags.go:64] FLAG: --reserved-cpus="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696497 4650 flags.go:64] FLAG: --reserved-memory="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696504 4650 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696509 4650 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696514 4650 flags.go:64] FLAG: --rotate-certificates="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696519 4650 flags.go:64] FLAG: --rotate-server-certificates="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696523 4650 flags.go:64] FLAG: --runonce="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696528 4650 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696533 4650 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696538 4650 flags.go:64] FLAG: --seccomp-default="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696542 4650 flags.go:64] FLAG: --serialize-image-pulls="true" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696546 4650 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696551 4650 flags.go:64] FLAG: --storage-driver-db="cadvisor" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696555 4650 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696560 4650 flags.go:64] FLAG: --storage-driver-password="root" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696565 4650 flags.go:64] FLAG: --storage-driver-secure="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696571 4650 flags.go:64] FLAG: --storage-driver-table="stats" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696575 4650 flags.go:64] FLAG: --storage-driver-user="root" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696580 4650 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696587 4650 flags.go:64] FLAG: --sync-frequency="1m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696592 4650 flags.go:64] FLAG: --system-cgroups="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696597 4650 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696606 4650 flags.go:64] FLAG: --system-reserved-cgroup="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696610 4650 flags.go:64] FLAG: --tls-cert-file="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696615 4650 flags.go:64] FLAG: --tls-cipher-suites="[]" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696622 4650 flags.go:64] FLAG: --tls-min-version="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696627 4650 flags.go:64] FLAG: --tls-private-key-file="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696632 4650 flags.go:64] FLAG: --topology-manager-policy="none" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696637 4650 flags.go:64] FLAG: --topology-manager-policy-options="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696642 4650 flags.go:64] FLAG: --topology-manager-scope="container" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696647 4650 flags.go:64] FLAG: --v="2" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696655 4650 flags.go:64] FLAG: --version="false" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696663 4650 flags.go:64] FLAG: --vmodule="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696669 4650 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.696674 4650 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696849 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696854 4650 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696858 4650 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696862 4650 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696866 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696870 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696874 4650 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696878 4650 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696882 4650 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696885 4650 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696890 4650 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696894 4650 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696899 4650 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696903 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696908 4650 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696913 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696918 4650 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696923 4650 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696927 4650 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696932 4650 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696936 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696940 4650 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696946 4650 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696951 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696956 4650 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696960 4650 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696964 4650 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696969 4650 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696973 4650 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696977 4650 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696981 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696985 4650 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696989 4650 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696993 4650 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.696996 4650 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697000 4650 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697005 4650 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697009 4650 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697013 4650 feature_gate.go:330] unrecognized feature gate: Example Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697019 4650 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697037 4650 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697041 4650 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697046 4650 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697049 4650 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697053 4650 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697057 4650 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697060 4650 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697064 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697067 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697071 4650 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697074 4650 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697078 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697081 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697085 4650 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697088 4650 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697092 4650 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697096 4650 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697100 4650 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697105 4650 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697125 4650 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697129 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697132 4650 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697136 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697140 4650 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697143 4650 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697146 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697150 4650 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697154 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697157 4650 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697161 4650 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.697164 4650 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.697188 4650 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.706802 4650 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.706829 4650 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706897 4650 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706909 4650 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706913 4650 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706918 4650 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706922 4650 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706926 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706930 4650 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706934 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706938 4650 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706942 4650 feature_gate.go:330] unrecognized feature gate: Example Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706946 4650 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706950 4650 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706953 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706957 4650 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706963 4650 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706967 4650 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706971 4650 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706975 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706980 4650 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706984 4650 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706988 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706991 4650 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706995 4650 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.706999 4650 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707005 4650 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707010 4650 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707017 4650 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707034 4650 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707039 4650 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707043 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707047 4650 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707051 4650 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707055 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707059 4650 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707064 4650 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707067 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707071 4650 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707076 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707080 4650 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707083 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707087 4650 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707091 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707095 4650 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707099 4650 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707103 4650 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707107 4650 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707111 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707115 4650 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707119 4650 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707123 4650 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707127 4650 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707131 4650 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707135 4650 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707139 4650 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707143 4650 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707148 4650 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707152 4650 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707156 4650 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707161 4650 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707166 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707170 4650 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707175 4650 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707179 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707185 4650 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707191 4650 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707197 4650 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707202 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707206 4650 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707211 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707215 4650 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707221 4650 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.707230 4650 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707365 4650 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707376 4650 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707381 4650 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707387 4650 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707392 4650 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707396 4650 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707401 4650 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707406 4650 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707410 4650 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707414 4650 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707419 4650 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707423 4650 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707427 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707431 4650 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707434 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707438 4650 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707442 4650 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707447 4650 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707451 4650 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707455 4650 feature_gate.go:330] unrecognized feature gate: Example Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707458 4650 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707462 4650 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707466 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707470 4650 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707474 4650 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707477 4650 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707481 4650 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707485 4650 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707489 4650 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707493 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707496 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707501 4650 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707505 4650 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707509 4650 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707513 4650 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707517 4650 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707522 4650 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707527 4650 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707531 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707535 4650 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707539 4650 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707543 4650 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707548 4650 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707552 4650 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707556 4650 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707560 4650 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707564 4650 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707568 4650 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707572 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707576 4650 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707580 4650 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707584 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707587 4650 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707591 4650 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707595 4650 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707599 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707602 4650 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707608 4650 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707618 4650 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707632 4650 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707636 4650 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707640 4650 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707644 4650 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707650 4650 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707655 4650 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707660 4650 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707664 4650 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707669 4650 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707673 4650 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707677 4650 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.707683 4650 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.707690 4650 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.707933 4650 server.go:940] "Client rotation is on, will bootstrap in background" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.712197 4650 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.712286 4650 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.713974 4650 server.go:997] "Starting client certificate rotation" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.713999 4650 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.715018 4650 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-21 06:02:15.537127976 +0000 UTC Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.715198 4650 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.743923 4650 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.751255 4650 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.751923 4650 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.769292 4650 log.go:25] "Validated CRI v1 runtime API" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.807556 4650 log.go:25] "Validated CRI v1 image API" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.810002 4650 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.815390 4650 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-02-01-07-18-28-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.815449 4650 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.840292 4650 manager.go:217] Machine: {Timestamp:2026-02-01 07:23:21.83721923 +0000 UTC m=+0.560317575 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2800000 MemoryCapacity:25199480832 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:df837a87-3594-4d79-9122-32f12f83a642 BootID:9f5f39b4-df24-4fd8-bc0a-6661ddd50241 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599742464 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:3076108 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599738368 Type:vfs Inodes:3076108 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039898624 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:8d:82:4d Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:8d:82:4d Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:cf:c0:fe Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:cf:b8:ca Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:a1:ef:ce Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:67:0e:e9 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:fe:02:90:c6:5c:21 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:32:c4:e7:c9:ce:92 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199480832 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.840665 4650 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.840889 4650 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.843508 4650 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.843897 4650 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.843953 4650 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.844352 4650 topology_manager.go:138] "Creating topology manager with none policy" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.844377 4650 container_manager_linux.go:303] "Creating device plugin manager" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.844986 4650 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.845067 4650 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.845364 4650 state_mem.go:36] "Initialized new in-memory state store" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.845512 4650 server.go:1245] "Using root directory" path="/var/lib/kubelet" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.850319 4650 kubelet.go:418] "Attempting to sync node with API server" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.850367 4650 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.850428 4650 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.850459 4650 kubelet.go:324] "Adding apiserver pod source" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.850492 4650 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.854879 4650 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.856447 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.856587 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.856693 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.856897 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.857812 4650 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.859967 4650 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.861855 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.861902 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.861916 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.861930 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.861951 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.861966 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.861981 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.862003 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.862020 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.862066 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.862084 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.862098 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.865648 4650 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.866533 4650 server.go:1280] "Started kubelet" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.867719 4650 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.868103 4650 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.868514 4650 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.868546 4650 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.868993 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.869077 4650 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.869634 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 07:30:32.030462731 +0000 UTC Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.869760 4650 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 01 07:23:21 crc systemd[1]: Started Kubernetes Kubelet. Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.869936 4650 volume_manager.go:287] "The desired_state_of_world populator starts" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.869956 4650 volume_manager.go:289] "Starting Kubelet Volume Manager" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.870064 4650 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.870281 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="200ms" Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.871754 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.871903 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.873712 4650 server.go:460] "Adding debug handlers to kubelet server" Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.878746 4650 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.18900e8141584a39 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-01 07:23:21.866480185 +0000 UTC m=+0.589578500,LastTimestamp:2026-02-01 07:23:21.866480185 +0000 UTC m=+0.589578500,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.880787 4650 factory.go:55] Registering systemd factory Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.880828 4650 factory.go:221] Registration of the systemd container factory successfully Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.881710 4650 factory.go:153] Registering CRI-O factory Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.881743 4650 factory.go:221] Registration of the crio container factory successfully Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.881828 4650 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.881863 4650 factory.go:103] Registering Raw factory Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.881895 4650 manager.go:1196] Started watching for new ooms in manager Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.883007 4650 manager.go:319] Starting recovery of all containers Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.893674 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.893837 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.893877 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.893906 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.893945 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.893976 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894019 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894082 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894126 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894155 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894184 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894222 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894253 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894297 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894324 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894359 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894391 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894494 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894540 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894569 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894605 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894635 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894662 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894706 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894738 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894774 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894812 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894853 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894885 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894924 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894953 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.894987 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895128 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895162 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895204 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895235 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895319 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895351 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895380 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895419 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895451 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895484 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895514 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895544 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895585 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895615 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895645 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895682 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895714 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895755 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895785 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895856 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895913 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895959 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.895992 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896072 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896114 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896143 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896181 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896209 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896236 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896275 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896299 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896333 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896359 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896451 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896490 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896531 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896563 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896604 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896634 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896671 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896700 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896727 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896766 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896795 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896829 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896855 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896881 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896914 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896939 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.896978 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897003 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897058 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897097 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897124 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897163 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897192 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897220 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897256 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897284 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897335 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897366 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897393 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897429 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897457 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897486 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897522 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897548 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897591 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897623 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897825 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897868 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.897910 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.898430 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.898489 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899156 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899229 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899277 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899311 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899344 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899369 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899401 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899424 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899452 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899473 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899499 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899525 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899546 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899570 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899591 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899619 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899638 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899658 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899695 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899747 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899769 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899795 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899818 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899952 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.899978 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900011 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900072 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900108 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900142 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900167 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900317 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900338 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.900358 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.901303 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.901335 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.901374 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.901396 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.901417 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909193 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909227 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909246 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909261 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909279 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909303 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909322 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909337 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909352 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909370 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909388 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909405 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909421 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909441 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909455 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909474 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909488 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909502 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909522 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909547 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909565 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909583 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909601 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909619 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909635 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909650 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909664 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909683 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909702 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909719 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909734 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909750 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909765 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909780 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909795 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909821 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909839 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909856 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909871 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909895 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909919 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909942 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909959 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.909979 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910000 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910018 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910058 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910077 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910098 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910115 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910136 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910157 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910173 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910195 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910210 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.910232 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912171 4650 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912206 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912224 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912240 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912261 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912277 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912292 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912308 4650 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912321 4650 reconstruct.go:97] "Volume reconstruction finished" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.912331 4650 reconciler.go:26] "Reconciler: start to sync state" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.919559 4650 manager.go:324] Recovery completed Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.932073 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.933523 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.933761 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.933776 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.934420 4650 cpu_manager.go:225] "Starting CPU manager" policy="none" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.934441 4650 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.934463 4650 state_mem.go:36] "Initialized new in-memory state store" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.949644 4650 policy_none.go:49] "None policy: Start" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.950557 4650 memory_manager.go:170] "Starting memorymanager" policy="None" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.950583 4650 state_mem.go:35] "Initializing new in-memory state store" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.955893 4650 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.963882 4650 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.963935 4650 status_manager.go:217] "Starting to sync pod status with apiserver" Feb 01 07:23:21 crc kubenswrapper[4650]: I0201 07:23:21.963963 4650 kubelet.go:2335] "Starting kubelet main sync loop" Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.964044 4650 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Feb 01 07:23:21 crc kubenswrapper[4650]: W0201 07:23:21.964809 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.964861 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:21 crc kubenswrapper[4650]: E0201 07:23:21.969870 4650 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.018706 4650 manager.go:334] "Starting Device Plugin manager" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.018764 4650 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.018780 4650 server.go:79] "Starting device plugin registration server" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.019286 4650 eviction_manager.go:189] "Eviction manager: starting control loop" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.019312 4650 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.019784 4650 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.019879 4650 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.019891 4650 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Feb 01 07:23:22 crc kubenswrapper[4650]: E0201 07:23:22.029469 4650 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.064841 4650 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.065000 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.067957 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.068010 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.068052 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.068556 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.068900 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.068962 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.069848 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.069877 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.069892 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: E0201 07:23:22.071013 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="400ms" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.071327 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.071368 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.071386 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.071499 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.071665 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.071718 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072453 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072495 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072510 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072618 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072704 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072766 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072777 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072872 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.072944 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.073977 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074008 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074035 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074594 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074616 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074802 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074815 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074686 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.074955 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.075516 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.075540 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.075550 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.075953 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.075995 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.076005 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.076060 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.076012 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.076906 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.077092 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.077107 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114518 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114575 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114613 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114647 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114715 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114811 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114850 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114878 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.114900 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.119865 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.121875 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.121931 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.121951 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.121987 4650 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 01 07:23:22 crc kubenswrapper[4650]: E0201 07:23:22.122664 4650 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216191 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216290 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216376 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216430 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216465 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216494 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216490 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216497 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216562 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216463 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216592 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216620 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216632 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216556 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216659 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216672 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216681 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216699 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216717 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216732 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216751 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216768 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216819 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.216884 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318087 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318186 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318231 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318270 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318289 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318354 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318424 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318473 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318303 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318568 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318595 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.318743 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.323424 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.324962 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.325014 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.325061 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.325099 4650 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 01 07:23:22 crc kubenswrapper[4650]: E0201 07:23:22.325725 4650 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.415885 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.445519 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: W0201 07:23:22.468901 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-318577ca155544fdf2a6c5b9a4cb8b4015a151025ed81aff1d607009fd9aff5b WatchSource:0}: Error finding container 318577ca155544fdf2a6c5b9a4cb8b4015a151025ed81aff1d607009fd9aff5b: Status 404 returned error can't find the container with id 318577ca155544fdf2a6c5b9a4cb8b4015a151025ed81aff1d607009fd9aff5b Feb 01 07:23:22 crc kubenswrapper[4650]: E0201 07:23:22.471922 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="800ms" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.473617 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.506729 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: W0201 07:23:22.510673 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-3791e7b3d116f0b41ba9d4da9c7f8dae29ec3fe19d6448bc9ea92c9174a9b479 WatchSource:0}: Error finding container 3791e7b3d116f0b41ba9d4da9c7f8dae29ec3fe19d6448bc9ea92c9174a9b479: Status 404 returned error can't find the container with id 3791e7b3d116f0b41ba9d4da9c7f8dae29ec3fe19d6448bc9ea92c9174a9b479 Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.517154 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 01 07:23:22 crc kubenswrapper[4650]: W0201 07:23:22.537060 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-f3310a8efe4a30f0b3050bde084a6187a7476713008bf2a47052798218c21b3b WatchSource:0}: Error finding container f3310a8efe4a30f0b3050bde084a6187a7476713008bf2a47052798218c21b3b: Status 404 returned error can't find the container with id f3310a8efe4a30f0b3050bde084a6187a7476713008bf2a47052798218c21b3b Feb 01 07:23:22 crc kubenswrapper[4650]: W0201 07:23:22.542110 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-ca713cd64a669e67095d7a9f5a39715a28b45a5c57dcf6462219d7a0f54e849d WatchSource:0}: Error finding container ca713cd64a669e67095d7a9f5a39715a28b45a5c57dcf6462219d7a0f54e849d: Status 404 returned error can't find the container with id ca713cd64a669e67095d7a9f5a39715a28b45a5c57dcf6462219d7a0f54e849d Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.726752 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.728653 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.728682 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.728690 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.728710 4650 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 01 07:23:22 crc kubenswrapper[4650]: E0201 07:23:22.728986 4650 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Feb 01 07:23:22 crc kubenswrapper[4650]: W0201 07:23:22.776276 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:22 crc kubenswrapper[4650]: E0201 07:23:22.776363 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.869445 4650 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.870422 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 16:59:52.390445456 +0000 UTC Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.970625 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"75e669137fd919a28e6b2c013aca40fbd74e88b9b9159ea35d2b35f4aba3ddff"} Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.972072 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"318577ca155544fdf2a6c5b9a4cb8b4015a151025ed81aff1d607009fd9aff5b"} Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.973555 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"ca713cd64a669e67095d7a9f5a39715a28b45a5c57dcf6462219d7a0f54e849d"} Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.975210 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f3310a8efe4a30f0b3050bde084a6187a7476713008bf2a47052798218c21b3b"} Feb 01 07:23:22 crc kubenswrapper[4650]: I0201 07:23:22.976806 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3791e7b3d116f0b41ba9d4da9c7f8dae29ec3fe19d6448bc9ea92c9174a9b479"} Feb 01 07:23:23 crc kubenswrapper[4650]: W0201 07:23:23.049991 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:23 crc kubenswrapper[4650]: E0201 07:23:23.050135 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:23 crc kubenswrapper[4650]: E0201 07:23:23.272962 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="1.6s" Feb 01 07:23:23 crc kubenswrapper[4650]: W0201 07:23:23.321755 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:23 crc kubenswrapper[4650]: E0201 07:23:23.322319 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:23 crc kubenswrapper[4650]: W0201 07:23:23.384090 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:23 crc kubenswrapper[4650]: E0201 07:23:23.384185 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.529866 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.531418 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.531449 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.531459 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.531484 4650 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 01 07:23:23 crc kubenswrapper[4650]: E0201 07:23:23.531866 4650 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.850308 4650 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 01 07:23:23 crc kubenswrapper[4650]: E0201 07:23:23.851234 4650 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.869436 4650 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.870604 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 05:17:57.577569294 +0000 UTC Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.983398 4650 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="ae08477a377b8a9970bdae5945412b83b02d682662656bba65f1a285b5a4e01a" exitCode=0 Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.983513 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"ae08477a377b8a9970bdae5945412b83b02d682662656bba65f1a285b5a4e01a"} Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.983643 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.984811 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.984857 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.984876 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.990356 4650 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e" exitCode=0 Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.990434 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e"} Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.990853 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.994419 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.994477 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.995180 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.996440 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1"} Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.996487 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d"} Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.996507 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b"} Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.999504 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d" exitCode=0 Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.999592 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d"} Feb 01 07:23:23 crc kubenswrapper[4650]: I0201 07:23:23.999854 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.001067 4650 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a5aa63dcc54aa1e3865f7264a674a0369cd70fa99a76905bbf6d261193d05a4f" exitCode=0 Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.001113 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a5aa63dcc54aa1e3865f7264a674a0369cd70fa99a76905bbf6d261193d05a4f"} Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.001214 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.001187 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.001271 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.001281 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.002476 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.002497 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.002511 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.003543 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.004665 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.004691 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.004699 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:24 crc kubenswrapper[4650]: W0201 07:23:24.851232 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:24 crc kubenswrapper[4650]: E0201 07:23:24.851320 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.870564 4650 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:24 crc kubenswrapper[4650]: I0201 07:23:24.871314 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 08:45:58.514767931 +0000 UTC Feb 01 07:23:24 crc kubenswrapper[4650]: E0201 07:23:24.874313 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="3.2s" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.004267 4650 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ca7ad6baa9756be131a6d29e2c3d9067f73306fe82d654e0808d7fa717085e82" exitCode=0 Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.004336 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ca7ad6baa9756be131a6d29e2c3d9067f73306fe82d654e0808d7fa717085e82"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.004485 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.005447 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.005478 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.005490 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.007707 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.007736 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"de423d103d88032359f0901b1ae4f11fe931dfaafbc8842536e22f9b73a2343b"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.008392 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.008425 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.008437 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.010985 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.011056 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.011064 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.011071 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.011680 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.011716 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.011728 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.013154 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.013165 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.013809 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.013828 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.013837 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.018267 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.018304 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.018319 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e"} Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.018334 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c"} Feb 01 07:23:25 crc kubenswrapper[4650]: W0201 07:23:25.070902 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Feb 01 07:23:25 crc kubenswrapper[4650]: E0201 07:23:25.070981 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.132607 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.133534 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.133566 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.133578 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.133598 4650 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 01 07:23:25 crc kubenswrapper[4650]: E0201 07:23:25.134039 4650 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Feb 01 07:23:25 crc kubenswrapper[4650]: I0201 07:23:25.872444 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 11:15:11.001103044 +0000 UTC Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.029970 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29"} Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.031133 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.032293 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.032342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.032360 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.035260 4650 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="75bb7bb516c4ba655a7e17cf1bb6159c33c2cdacc4fe7da56bf28b79df4893bd" exitCode=0 Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.035419 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.035431 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.035473 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.035530 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.035530 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.035775 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"75bb7bb516c4ba655a7e17cf1bb6159c33c2cdacc4fe7da56bf28b79df4893bd"} Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037414 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037454 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037470 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037480 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037510 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037529 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037426 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037584 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037600 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037677 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037696 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.037712 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:26 crc kubenswrapper[4650]: I0201 07:23:26.873357 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 14:40:56.25490676 +0000 UTC Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.042975 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ca2252d7d2c7b200da374f9b537472556436856e8a048bb3c4d85d71b474c1ee"} Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.043011 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.043098 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.043016 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"910ec686da1b92d850607d6b7e8440a8161587dbc664e2b7eaa019d3610e7e49"} Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.044297 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.044343 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.044388 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.685018 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.685312 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.687733 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.687799 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.687820 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.696408 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.874478 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 20:27:14.782544745 +0000 UTC Feb 01 07:23:27 crc kubenswrapper[4650]: I0201 07:23:27.949929 4650 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.053308 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b85cce18d0dde8ecd1aae782f56766ea995b69e891ec7880f5cf12133f8740ec"} Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.053378 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"53565e4e6e9bd72395f7216877be07928e66899c4c70bc20a14c05b3c37fee26"} Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.053390 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.053426 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.053401 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9a9b4419798ee164052de5c30996fd93209ae532d70838d4086f7bab2c5766c2"} Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.054953 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.055000 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.055348 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.055378 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.055396 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.056115 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.184466 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.185062 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.186757 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.186990 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.187165 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.335128 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.336857 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.337094 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.337241 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.337398 4650 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.499259 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.499568 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.501170 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.501220 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.501238 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:28 crc kubenswrapper[4650]: I0201 07:23:28.875345 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 06:31:23.718926971 +0000 UTC Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.055719 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.056966 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.057022 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.057091 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.136978 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.137269 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.138360 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.138410 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.138428 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.721227 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:29 crc kubenswrapper[4650]: I0201 07:23:29.876101 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 22:56:24.26223166 +0000 UTC Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.058988 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.060326 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.060373 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.060390 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.251246 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.251469 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.252965 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.253015 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.253068 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:30 crc kubenswrapper[4650]: I0201 07:23:30.876783 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 15:32:55.895300253 +0000 UTC Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.265108 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.265381 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.266991 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.267068 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.267086 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.720415 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.720642 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.721939 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.721983 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.722001 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:31 crc kubenswrapper[4650]: I0201 07:23:31.877901 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 16:15:37.783912907 +0000 UTC Feb 01 07:23:32 crc kubenswrapper[4650]: E0201 07:23:32.029612 4650 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 01 07:23:32 crc kubenswrapper[4650]: I0201 07:23:32.878748 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 09:08:16.449967031 +0000 UTC Feb 01 07:23:32 crc kubenswrapper[4650]: I0201 07:23:32.987391 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:32 crc kubenswrapper[4650]: I0201 07:23:32.987589 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:32 crc kubenswrapper[4650]: I0201 07:23:32.989340 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:32 crc kubenswrapper[4650]: I0201 07:23:32.989392 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:32 crc kubenswrapper[4650]: I0201 07:23:32.989412 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:33 crc kubenswrapper[4650]: I0201 07:23:33.879696 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 20:49:39.61612191 +0000 UTC Feb 01 07:23:34 crc kubenswrapper[4650]: I0201 07:23:34.880268 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 01:11:21.409148813 +0000 UTC Feb 01 07:23:35 crc kubenswrapper[4650]: W0201 07:23:35.803240 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Feb 01 07:23:35 crc kubenswrapper[4650]: I0201 07:23:35.803335 4650 trace.go:236] Trace[1743166925]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (01-Feb-2026 07:23:25.801) (total time: 10001ms): Feb 01 07:23:35 crc kubenswrapper[4650]: Trace[1743166925]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (07:23:35.803) Feb 01 07:23:35 crc kubenswrapper[4650]: Trace[1743166925]: [10.001913948s] [10.001913948s] END Feb 01 07:23:35 crc kubenswrapper[4650]: E0201 07:23:35.803358 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Feb 01 07:23:35 crc kubenswrapper[4650]: I0201 07:23:35.870472 4650 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Feb 01 07:23:35 crc kubenswrapper[4650]: I0201 07:23:35.880739 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 10:43:38.468837455 +0000 UTC Feb 01 07:23:35 crc kubenswrapper[4650]: I0201 07:23:35.987426 4650 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:23:35 crc kubenswrapper[4650]: I0201 07:23:35.988614 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:23:36 crc kubenswrapper[4650]: W0201 07:23:36.435566 4650 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Feb 01 07:23:36 crc kubenswrapper[4650]: I0201 07:23:36.435761 4650 trace.go:236] Trace[201502366]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (01-Feb-2026 07:23:26.433) (total time: 10002ms): Feb 01 07:23:36 crc kubenswrapper[4650]: Trace[201502366]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (07:23:36.435) Feb 01 07:23:36 crc kubenswrapper[4650]: Trace[201502366]: [10.002238897s] [10.002238897s] END Feb 01 07:23:36 crc kubenswrapper[4650]: E0201 07:23:36.435801 4650 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Feb 01 07:23:36 crc kubenswrapper[4650]: I0201 07:23:36.527551 4650 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Feb 01 07:23:36 crc kubenswrapper[4650]: I0201 07:23:36.527806 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 01 07:23:36 crc kubenswrapper[4650]: I0201 07:23:36.535420 4650 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Feb 01 07:23:36 crc kubenswrapper[4650]: I0201 07:23:36.535494 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 01 07:23:36 crc kubenswrapper[4650]: I0201 07:23:36.883699 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 10:59:19.254846509 +0000 UTC Feb 01 07:23:37 crc kubenswrapper[4650]: I0201 07:23:37.805810 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Feb 01 07:23:37 crc kubenswrapper[4650]: I0201 07:23:37.806094 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:37 crc kubenswrapper[4650]: I0201 07:23:37.807801 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:37 crc kubenswrapper[4650]: I0201 07:23:37.807857 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:37 crc kubenswrapper[4650]: I0201 07:23:37.807873 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:37 crc kubenswrapper[4650]: I0201 07:23:37.848745 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Feb 01 07:23:37 crc kubenswrapper[4650]: I0201 07:23:37.885210 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 07:27:25.145847635 +0000 UTC Feb 01 07:23:38 crc kubenswrapper[4650]: I0201 07:23:38.085215 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:38 crc kubenswrapper[4650]: I0201 07:23:38.086656 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:38 crc kubenswrapper[4650]: I0201 07:23:38.086730 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:38 crc kubenswrapper[4650]: I0201 07:23:38.086758 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:38 crc kubenswrapper[4650]: I0201 07:23:38.101327 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Feb 01 07:23:38 crc kubenswrapper[4650]: I0201 07:23:38.886173 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 22:05:51.617339806 +0000 UTC Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.088185 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.089652 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.089767 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.089790 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.351655 4650 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.731037 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.731330 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.732707 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.732766 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.732785 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.739089 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:39 crc kubenswrapper[4650]: I0201 07:23:39.886953 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 05:01:27.422837278 +0000 UTC Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.090602 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.090674 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.096993 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.097095 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.097120 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.259400 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.259599 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.260977 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.261036 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.261081 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:40 crc kubenswrapper[4650]: I0201 07:23:40.887665 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 15:16:37.587807607 +0000 UTC Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.524680 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.529370 4650 trace.go:236] Trace[585655355]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (01-Feb-2026 07:23:28.309) (total time: 13220ms): Feb 01 07:23:41 crc kubenswrapper[4650]: Trace[585655355]: ---"Objects listed" error: 13220ms (07:23:41.529) Feb 01 07:23:41 crc kubenswrapper[4650]: Trace[585655355]: [13.220089157s] [13.220089157s] END Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.530223 4650 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.530166 4650 trace.go:236] Trace[1380061964]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (01-Feb-2026 07:23:29.412) (total time: 12117ms): Feb 01 07:23:41 crc kubenswrapper[4650]: Trace[1380061964]: ---"Objects listed" error: 12117ms (07:23:41.530) Feb 01 07:23:41 crc kubenswrapper[4650]: Trace[1380061964]: [12.117423414s] [12.117423414s] END Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.531514 4650 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.531612 4650 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.531458 4650 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.556612 4650 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.596935 4650 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:40270->192.168.126.11:17697: read: connection reset by peer" start-of-body= Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.597031 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:40270->192.168.126.11:17697: read: connection reset by peer" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.598335 4650 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:40278->192.168.126.11:17697: read: connection reset by peer" start-of-body= Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.600572 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:40278->192.168.126.11:17697: read: connection reset by peer" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.601532 4650 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.601632 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.864972 4650 apiserver.go:52] "Watching apiserver" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.869820 4650 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.870140 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.870481 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.870575 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.870654 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.870947 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.870975 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.871118 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.871451 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.871598 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.871839 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.878159 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.878518 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.878613 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.878735 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.879322 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.879537 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.883578 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.883637 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.886829 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.889416 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 17:12:18.778725131 +0000 UTC Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.909322 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.924708 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.936986 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.937095 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.937146 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.937182 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.937222 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.937245 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.938151 4650 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.950152 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.950820 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954507 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954542 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954557 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954632 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:42.454611263 +0000 UTC m=+21.177709508 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954909 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954931 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954943 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:41 crc kubenswrapper[4650]: E0201 07:23:41.954985 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:42.454972353 +0000 UTC m=+21.178070598 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.955689 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.965851 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.970720 4650 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.984808 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:41 crc kubenswrapper[4650]: I0201 07:23:41.994926 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.003802 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.014181 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.024575 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.037875 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038099 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038182 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038274 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038353 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038426 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038517 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038588 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038662 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038747 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038823 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038895 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.038968 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039061 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039133 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039214 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039376 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039450 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039522 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039595 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039671 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039763 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039837 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039913 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.039982 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040075 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040154 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040228 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040309 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040385 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040451 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040525 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040599 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040670 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040762 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040843 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040919 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.040989 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041079 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041161 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041239 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041329 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041397 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041473 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041547 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041612 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041686 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041754 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041843 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041938 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042018 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042140 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042221 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042383 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042472 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042546 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042617 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042688 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042760 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042841 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042917 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042986 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043077 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043159 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043235 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043310 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043448 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043551 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043650 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043750 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043856 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043958 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044079 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044190 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044295 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044402 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044631 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044740 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044842 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044938 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045029 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045152 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045288 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045376 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045482 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045579 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045655 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045752 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045833 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045910 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046002 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046190 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046360 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046405 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046430 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046451 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046476 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046497 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046519 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046543 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046566 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046588 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046610 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046631 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046650 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046670 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046706 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046727 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046758 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046777 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046795 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046814 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046836 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046858 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046882 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046905 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046927 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046947 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046968 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046988 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047009 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047078 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047108 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047132 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047152 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047174 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047192 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047213 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047231 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047249 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047270 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047289 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047309 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047332 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047360 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047384 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047407 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047433 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047483 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047502 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047525 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047543 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047561 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047581 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047602 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047622 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047645 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047667 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047714 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047735 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047754 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047773 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047796 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047816 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047840 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047862 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047882 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047899 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047921 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047939 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047958 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047979 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048004 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048024 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048045 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048080 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048103 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048123 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048144 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048165 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048185 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048207 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048227 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048247 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048268 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048287 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048308 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048327 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048351 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048371 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048390 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048412 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048434 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048455 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048474 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048494 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048512 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048532 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048550 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048572 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048592 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048610 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048636 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048659 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048682 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048737 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048793 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048818 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048843 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048901 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048927 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048953 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048975 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048999 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.049040 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.106834 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108238 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.041582 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042014 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042328 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042486 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.042986 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043140 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043472 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.043698 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044134 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044182 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044325 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.044773 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045096 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045190 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045697 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045718 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.045810 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046132 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046279 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046313 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.046823 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047396 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047490 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047574 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.047967 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048313 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048390 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048410 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048603 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048704 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.048931 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.049013 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.049129 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.049318 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.049727 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.050156 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.050180 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.050476 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.050757 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.050956 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.051204 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.052443 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.054330 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.054526 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.054722 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.054911 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.055128 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.055311 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.055473 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.055683 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.055895 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.056091 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.056584 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.056923 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.057160 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.057320 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.057472 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.057637 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.057795 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.066335 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.066631 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.066771 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.067818 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.068002 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.068298 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.074234 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.074564 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.074797 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.075454 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.075787 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.076180 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.076518 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.076957 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.080893 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.081209 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.081313 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.081597 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.081670 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.082166 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.082292 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.082387 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.082518 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.082723 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.083840 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.084032 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.084065 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.084182 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.084396 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.084716 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.084949 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.085214 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.085232 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.090216 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.090706 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.090868 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.091339 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.091545 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.091861 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.092116 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.092256 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.092412 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.092602 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.092763 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.092922 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.093100 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.093237 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.093643 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.094504 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.095176 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.095447 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.096344 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.096546 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.096729 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.097108 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.097257 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.097398 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.097550 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.097694 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.097848 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.098032 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.099137 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.099308 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.099671 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.100189 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.100495 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.101082 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.101193 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.101401 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.101409 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.102231 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.102618 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.103390 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.103396 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.103687 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.103750 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.105397 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.106947 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.103417 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.107506 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.107620 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.107821 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.107840 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108064 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108119 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108279 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108739 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108760 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108778 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108885 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.108999 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.109678 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.109696 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.109811 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.109842 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.109897 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.110101 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.110156 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.110348 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.110538 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.110542 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.110559 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.116769 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.116899 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:23:42.616876914 +0000 UTC m=+21.339975249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.127451 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:42.627430964 +0000 UTC m=+21.350529209 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.127464 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:42.627458135 +0000 UTC m=+21.350556380 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.116903 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.117365 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.117609 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.118145 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.118636 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.118836 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.119116 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.120026 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.120830 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.121512 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.121811 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.126107 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.127581 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.127717 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.127986 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.128086 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.129656 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.129804 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.130154 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.111189 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.110661 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.111256 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.113635 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.130350 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.130856 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.131037 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.132623 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.132645 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.132909 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.133760 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.134313 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.135192 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.135322 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.138926 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29" exitCode=255 Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.138958 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.138988 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29"} Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.139451 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.141877 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.142178 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149223 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149674 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149751 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149765 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149777 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149787 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149797 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149811 4650 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149820 4650 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149831 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149839 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149850 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149859 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149870 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149879 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149905 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149915 4650 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149924 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149933 4650 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149943 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149952 4650 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149960 4650 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149969 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149977 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149986 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.149997 4650 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150006 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150015 4650 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150027 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150035 4650 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150070 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150080 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150089 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150100 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150109 4650 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150118 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150128 4650 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150138 4650 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150147 4650 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150155 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150166 4650 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150175 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150184 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150193 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150201 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150210 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150219 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150227 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150236 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150246 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150254 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150262 4650 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150271 4650 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150279 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150288 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150297 4650 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150306 4650 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150314 4650 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150322 4650 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150331 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150339 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150349 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150358 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150367 4650 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150376 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150384 4650 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150392 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150401 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150409 4650 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150417 4650 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150426 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150434 4650 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150442 4650 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150450 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150461 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150470 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150479 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150487 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150496 4650 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150505 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150513 4650 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150521 4650 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150530 4650 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150538 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150548 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150556 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150564 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150572 4650 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150581 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150589 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150597 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150605 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150614 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150621 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150631 4650 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150639 4650 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150648 4650 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150659 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150668 4650 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150677 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150685 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150694 4650 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150703 4650 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150711 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150720 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150728 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150736 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150745 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150753 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150762 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150771 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150779 4650 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150786 4650 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150795 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150804 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150813 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150821 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150829 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150838 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150848 4650 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150857 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150865 4650 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150873 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150881 4650 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150890 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150898 4650 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150906 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150914 4650 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150921 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150930 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150939 4650 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150946 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150954 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150963 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150971 4650 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150979 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150988 4650 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.150998 4650 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151008 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151016 4650 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151027 4650 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151036 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151057 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151066 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151075 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151083 4650 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151093 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151101 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151110 4650 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151119 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151127 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151136 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151144 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151152 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151161 4650 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151169 4650 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151177 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151186 4650 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151194 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151202 4650 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151210 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151220 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151228 4650 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151237 4650 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151245 4650 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151253 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151261 4650 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151270 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151280 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151289 4650 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151298 4650 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151306 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151315 4650 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151323 4650 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151332 4650 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151340 4650 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151348 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151357 4650 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151364 4650 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151373 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151381 4650 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151389 4650 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151401 4650 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151411 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151424 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151436 4650 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151446 4650 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151457 4650 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151469 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151478 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151487 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151495 4650 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.151602 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.165325 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.165476 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.173090 4650 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.175942 4650 scope.go:117] "RemoveContainer" containerID="de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.176858 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.184246 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.188131 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.190065 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.193905 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.196449 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.199145 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.220431 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 01 07:23:42 crc kubenswrapper[4650]: W0201 07:23:42.230332 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-1b4120c1383033a1d4225171195a40393d6e3fe43397691111016bbf7d44643b WatchSource:0}: Error finding container 1b4120c1383033a1d4225171195a40393d6e3fe43397691111016bbf7d44643b: Status 404 returned error can't find the container with id 1b4120c1383033a1d4225171195a40393d6e3fe43397691111016bbf7d44643b Feb 01 07:23:42 crc kubenswrapper[4650]: W0201 07:23:42.234970 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-1656d793965b0823746eda1b34a26677054289bcbf91406c96a573bf6540ace3 WatchSource:0}: Error finding container 1656d793965b0823746eda1b34a26677054289bcbf91406c96a573bf6540ace3: Status 404 returned error can't find the container with id 1656d793965b0823746eda1b34a26677054289bcbf91406c96a573bf6540ace3 Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.247370 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.255770 4650 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.255801 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.255811 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.255822 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.256729 4650 csr.go:261] certificate signing request csr-rr66b is approved, waiting to be issued Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.315579 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.324639 4650 csr.go:257] certificate signing request csr-rr66b is issued Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.360535 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.423656 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.440360 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.473791 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.473849 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.473975 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.473993 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.474006 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.474073 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:43.474057181 +0000 UTC m=+22.197155426 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.474582 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.474599 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.474610 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.474642 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:43.474632875 +0000 UTC m=+22.197731120 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.675381 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.675825 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.675853 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.675932 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.675941 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:23:43.675860533 +0000 UTC m=+22.398958808 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.676056 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:43.676005187 +0000 UTC m=+22.399103692 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.676346 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.676501 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:43.676485709 +0000 UTC m=+22.399583974 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.844563 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-nlgpt"] Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.844821 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.849042 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.849195 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.852454 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.867578 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.879933 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.890151 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 08:42:36.116042037 +0000 UTC Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.913827 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.936075 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.960686 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.964920 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:42 crc kubenswrapper[4650]: E0201 07:23:42.965150 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.976498 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.978945 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/478bcf82-4ee9-40c3-af5e-eb4731802b79-hosts-file\") pod \"node-resolver-nlgpt\" (UID: \"478bcf82-4ee9-40c3-af5e-eb4731802b79\") " pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.979006 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72x9j\" (UniqueName: \"kubernetes.io/projected/478bcf82-4ee9-40c3-af5e-eb4731802b79-kube-api-access-72x9j\") pod \"node-resolver-nlgpt\" (UID: \"478bcf82-4ee9-40c3-af5e-eb4731802b79\") " pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:42 crc kubenswrapper[4650]: I0201 07:23:42.992663 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.003342 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.046530 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.051073 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.066273 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.074873 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.079532 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/478bcf82-4ee9-40c3-af5e-eb4731802b79-hosts-file\") pod \"node-resolver-nlgpt\" (UID: \"478bcf82-4ee9-40c3-af5e-eb4731802b79\") " pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.079600 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72x9j\" (UniqueName: \"kubernetes.io/projected/478bcf82-4ee9-40c3-af5e-eb4731802b79-kube-api-access-72x9j\") pod \"node-resolver-nlgpt\" (UID: \"478bcf82-4ee9-40c3-af5e-eb4731802b79\") " pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.079754 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/478bcf82-4ee9-40c3-af5e-eb4731802b79-hosts-file\") pod \"node-resolver-nlgpt\" (UID: \"478bcf82-4ee9-40c3-af5e-eb4731802b79\") " pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.086826 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.098466 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.101722 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72x9j\" (UniqueName: \"kubernetes.io/projected/478bcf82-4ee9-40c3-af5e-eb4731802b79-kube-api-access-72x9j\") pod \"node-resolver-nlgpt\" (UID: \"478bcf82-4ee9-40c3-af5e-eb4731802b79\") " pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.133441 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.149923 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.151450 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.155491 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-nlgpt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.155808 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f"} Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.156179 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.167377 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.172301 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"fd7a032331294815328c9bb36beda9eeb580acb20e05d4cb71e8c4c243a00e09"} Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.184928 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.194761 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332"} Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.194832 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372"} Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.194849 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1656d793965b0823746eda1b34a26677054289bcbf91406c96a573bf6540ace3"} Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.203249 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.203331 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2"} Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.203359 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"1b4120c1383033a1d4225171195a40393d6e3fe43397691111016bbf7d44643b"} Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.222544 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.238103 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.251191 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-xfq9r"] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.251556 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: W0201 07:23:43.257701 4650 reflector.go:561] object-"openshift-machine-config-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.257741 4650 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:23:43 crc kubenswrapper[4650]: W0201 07:23:43.257709 4650 reflector.go:561] object-"openshift-machine-config-operator"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.257770 4650 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:23:43 crc kubenswrapper[4650]: W0201 07:23:43.257795 4650 reflector.go:561] object-"openshift-machine-config-operator"/"proxy-tls": failed to list *v1.Secret: secrets "proxy-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.257816 4650 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"proxy-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"proxy-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:23:43 crc kubenswrapper[4650]: W0201 07:23:43.258197 4650 reflector.go:561] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": failed to list *v1.Secret: secrets "machine-config-daemon-dockercfg-r5tcq" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.258347 4650 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-r5tcq\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-config-daemon-dockercfg-r5tcq\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:23:43 crc kubenswrapper[4650]: W0201 07:23:43.258603 4650 reflector.go:561] object-"openshift-machine-config-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.258733 4650 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.261106 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-bvkr8"] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.262005 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.266105 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.266306 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.266461 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.266841 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.268606 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.270565 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.289385 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.314342 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.325675 4650 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-02-01 07:18:42 +0000 UTC, rotation deadline is 2026-11-19 13:51:33.132747005 +0000 UTC Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.325737 4650 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6990h27m49.807013289s for next certificate rotation Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.334453 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.350677 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.370452 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.382915 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8dd1b5da-94bb-4bf2-8fed-958df80a8806-mcd-auth-proxy-config\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.382970 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cni-binary-copy\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.382988 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383007 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt7jl\" (UniqueName: \"kubernetes.io/projected/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-kube-api-access-lt7jl\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383029 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8dd1b5da-94bb-4bf2-8fed-958df80a8806-proxy-tls\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383068 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-os-release\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383086 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383126 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cnibin\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383154 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-system-cni-dir\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383173 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8dd1b5da-94bb-4bf2-8fed-958df80a8806-rootfs\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.383188 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzwg6\" (UniqueName: \"kubernetes.io/projected/8dd1b5da-94bb-4bf2-8fed-958df80a8806-kube-api-access-xzwg6\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.385394 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.399722 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.416200 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.442467 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.471017 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484167 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484399 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8dd1b5da-94bb-4bf2-8fed-958df80a8806-mcd-auth-proxy-config\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484639 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cni-binary-copy\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484677 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484700 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt7jl\" (UniqueName: \"kubernetes.io/projected/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-kube-api-access-lt7jl\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484737 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8dd1b5da-94bb-4bf2-8fed-958df80a8806-proxy-tls\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484778 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484822 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-os-release\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.484842 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485447 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485489 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485503 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.485547 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485576 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:45.485556254 +0000 UTC m=+24.208654499 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.485661 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cnibin\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.485742 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-system-cni-dir\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485751 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485777 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.485763 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8dd1b5da-94bb-4bf2-8fed-958df80a8806-rootfs\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485793 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.485811 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzwg6\" (UniqueName: \"kubernetes.io/projected/8dd1b5da-94bb-4bf2-8fed-958df80a8806-kube-api-access-xzwg6\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.485859 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:45.485838731 +0000 UTC m=+24.208936976 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.485455 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-os-release\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.486302 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cni-binary-copy\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.486388 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.486422 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-cnibin\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.486394 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-system-cni-dir\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.486488 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8dd1b5da-94bb-4bf2-8fed-958df80a8806-rootfs\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.488704 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.501319 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.507345 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt7jl\" (UniqueName: \"kubernetes.io/projected/f0ea3e95-72a7-4a87-ab12-6c31f7befe3b-kube-api-access-lt7jl\") pod \"multus-additional-cni-plugins-bvkr8\" (UID: \"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\") " pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.509787 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.520836 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.530758 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.538189 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.551621 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.576925 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" Feb 01 07:23:43 crc kubenswrapper[4650]: W0201 07:23:43.589381 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0ea3e95_72a7_4a87_ab12_6c31f7befe3b.slice/crio-7af083842a01410bc3a13669dd4d148f2762c0bedb360fd15949ba0f1000ed9e WatchSource:0}: Error finding container 7af083842a01410bc3a13669dd4d148f2762c0bedb360fd15949ba0f1000ed9e: Status 404 returned error can't find the container with id 7af083842a01410bc3a13669dd4d148f2762c0bedb360fd15949ba0f1000ed9e Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.661798 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-k6xtw"] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.662334 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.669735 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.670371 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hm5cs"] Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.671272 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.671843 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.678281 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.678323 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.678558 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.678725 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.679019 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.679409 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.683592 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.687647 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.687734 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.687830 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.687976 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.688057 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:45.688027663 +0000 UTC m=+24.411125908 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.688459 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:23:45.688450023 +0000 UTC m=+24.411548268 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.688516 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.688545 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:45.688536066 +0000 UTC m=+24.411634311 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.697380 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.713827 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.745433 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.761279 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788417 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x24c\" (UniqueName: \"kubernetes.io/projected/e408ebb2-07fc-4317-92d4-1316ece830fb-kube-api-access-8x24c\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788456 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-etc-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788552 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-config\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788581 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-netns\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788602 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-env-overrides\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788624 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-cni-bin\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788639 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-kubelet\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788654 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-netns\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788669 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-multus-certs\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788746 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-daemon-config\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788796 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-systemd\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788823 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-var-lib-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788844 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-netd\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788879 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovn-node-metrics-cert\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788930 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-cni-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788966 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.788993 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvhzk\" (UniqueName: \"kubernetes.io/projected/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-kube-api-access-nvhzk\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789047 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-slash\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789070 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-bin\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789099 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-socket-dir-parent\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789115 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-cni-multus\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789182 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-conf-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789201 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-systemd-units\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789243 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-ovn-kubernetes\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789267 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789288 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-cnibin\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789307 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e408ebb2-07fc-4317-92d4-1316ece830fb-cni-binary-copy\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789331 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-k8s-cni-cncf-io\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789350 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-script-lib\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789382 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-hostroot\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789410 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-os-release\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789437 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-etc-kubernetes\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789463 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-node-log\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789520 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-ovn\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789580 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-system-cni-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789605 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-kubelet\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.789625 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-log-socket\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.799587 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.815579 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.830146 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.844966 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.881430 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890240 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-ovn-kubernetes\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890282 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890322 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-conf-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890346 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-systemd-units\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890367 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-cnibin\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890385 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e408ebb2-07fc-4317-92d4-1316ece830fb-cni-binary-copy\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890407 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-k8s-cni-cncf-io\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890423 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-script-lib\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890449 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-hostroot\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890450 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890468 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-os-release\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890488 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-etc-kubernetes\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890506 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-node-log\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890508 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-systemd-units\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890578 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-ovn\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890530 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-ovn\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890549 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 18:25:46.125491977 +0000 UTC Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890626 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-system-cni-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890647 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-kubelet\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890663 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-log-socket\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890669 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-os-release\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890682 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x24c\" (UniqueName: \"kubernetes.io/projected/e408ebb2-07fc-4317-92d4-1316ece830fb-kube-api-access-8x24c\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890702 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-etc-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890709 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-hostroot\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890701 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-cnibin\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890739 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-kubelet\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890746 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-k8s-cni-cncf-io\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890705 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-etc-kubernetes\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890736 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-config\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890908 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-netns\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890927 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-etc-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890908 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-system-cni-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890934 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-env-overrides\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890984 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-log-socket\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890990 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-netns\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890999 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-netns\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891047 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-netns\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891085 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-cni-bin\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891112 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-kubelet\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891135 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-cni-bin\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891155 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-kubelet\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891182 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-multus-certs\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891214 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovn-node-metrics-cert\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891233 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-daemon-config\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891287 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-node-log\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891321 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-run-multus-certs\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891359 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-systemd\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891380 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-script-lib\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891383 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-var-lib-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891427 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-var-lib-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891435 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-netd\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891473 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-netd\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891509 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-cni-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891530 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891564 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvhzk\" (UniqueName: \"kubernetes.io/projected/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-kube-api-access-nvhzk\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891586 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-slash\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891626 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-bin\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891656 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-config\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891664 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-socket-dir-parent\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891700 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-env-overrides\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891690 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e408ebb2-07fc-4317-92d4-1316ece830fb-cni-binary-copy\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891746 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-cni-multus\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891757 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-slash\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891782 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-systemd\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891824 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-socket-dir-parent\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891831 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-host-var-lib-cni-multus\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891873 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-bin\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891913 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-daemon-config\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891890 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-openvswitch\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891952 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-cni-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.891963 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e408ebb2-07fc-4317-92d4-1316ece830fb-multus-conf-dir\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.890378 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-ovn-kubernetes\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.897554 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovn-node-metrics-cert\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.921461 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvhzk\" (UniqueName: \"kubernetes.io/projected/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-kube-api-access-nvhzk\") pod \"ovnkube-node-hm5cs\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.933317 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.934603 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x24c\" (UniqueName: \"kubernetes.io/projected/e408ebb2-07fc-4317-92d4-1316ece830fb-kube-api-access-8x24c\") pod \"multus-k6xtw\" (UID: \"e408ebb2-07fc-4317-92d4-1316ece830fb\") " pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.964716 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.964814 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.964884 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:43 crc kubenswrapper[4650]: E0201 07:23:43.964956 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.971555 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:43Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.971997 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.972519 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.974960 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.975649 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.978850 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.979527 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.980281 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-k6xtw" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.980696 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.981476 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.982477 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.983104 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.984075 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.986545 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.987040 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.987740 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.988552 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.989096 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.989956 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.990546 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.990901 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.991904 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Feb 01 07:23:43 crc kubenswrapper[4650]: I0201 07:23:43.992461 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.006300 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.007538 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.008089 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.009416 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.009913 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.014530 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.015288 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.016296 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.016931 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.017875 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.018398 4650 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.018505 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.023095 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.023672 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.024167 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.029882 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.031109 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.034504 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.035225 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.036359 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.036835 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.037875 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.038661 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.039684 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.040370 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.041546 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.049794 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.050880 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.052335 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.052887 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.053919 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.055580 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.056448 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.057196 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.062260 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.072727 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.091445 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.140618 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.157063 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.173950 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.201492 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.208456 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerStarted","Data":"36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.208507 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerStarted","Data":"af8ab6ba1fd47356179565cabe34c026bb9475ac9d8e82f59634af91f8133c16"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.211237 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-nlgpt" event={"ID":"478bcf82-4ee9-40c3-af5e-eb4731802b79","Type":"ContainerStarted","Data":"c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.211307 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-nlgpt" event={"ID":"478bcf82-4ee9-40c3-af5e-eb4731802b79","Type":"ContainerStarted","Data":"2a73f17c161e828fde34e244df28f40dc1c10caf0e34181ae6ff6d949e6d2039"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.216677 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.216718 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"86946137a5915665e78dfe221b65ae5970b1f8c9b74fda5a719c222c704f43d3"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.220668 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.222435 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerStarted","Data":"681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.222551 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerStarted","Data":"7af083842a01410bc3a13669dd4d148f2762c0bedb360fd15949ba0f1000ed9e"} Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.238599 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.257901 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.282181 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.311615 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.328521 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.345899 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.376587 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.396381 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.417220 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.434696 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.453286 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.473530 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.486425 4650 configmap.go:193] Couldn't get configMap openshift-machine-config-operator/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.486535 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8dd1b5da-94bb-4bf2-8fed-958df80a8806-mcd-auth-proxy-config podName:8dd1b5da-94bb-4bf2-8fed-958df80a8806 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:44.986510298 +0000 UTC m=+23.709608543 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "mcd-auth-proxy-config" (UniqueName: "kubernetes.io/configmap/8dd1b5da-94bb-4bf2-8fed-958df80a8806-mcd-auth-proxy-config") pod "machine-config-daemon-xfq9r" (UID: "8dd1b5da-94bb-4bf2-8fed-958df80a8806") : failed to sync configmap cache: timed out waiting for the condition Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.486813 4650 secret.go:188] Couldn't get secret openshift-machine-config-operator/proxy-tls: failed to sync secret cache: timed out waiting for the condition Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.486853 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8dd1b5da-94bb-4bf2-8fed-958df80a8806-proxy-tls podName:8dd1b5da-94bb-4bf2-8fed-958df80a8806 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:44.986845186 +0000 UTC m=+23.709943431 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/8dd1b5da-94bb-4bf2-8fed-958df80a8806-proxy-tls") pod "machine-config-daemon-xfq9r" (UID: "8dd1b5da-94bb-4bf2-8fed-958df80a8806") : failed to sync secret cache: timed out waiting for the condition Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.496766 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.509138 4650 projected.go:288] Couldn't get configMap openshift-machine-config-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.509203 4650 projected.go:194] Error preparing data for projected volume kube-api-access-xzwg6 for pod openshift-machine-config-operator/machine-config-daemon-xfq9r: failed to sync configmap cache: timed out waiting for the condition Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.509277 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8dd1b5da-94bb-4bf2-8fed-958df80a8806-kube-api-access-xzwg6 podName:8dd1b5da-94bb-4bf2-8fed-958df80a8806 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:45.00925666 +0000 UTC m=+23.732354905 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-xzwg6" (UniqueName: "kubernetes.io/projected/8dd1b5da-94bb-4bf2-8fed-958df80a8806-kube-api-access-xzwg6") pod "machine-config-daemon-xfq9r" (UID: "8dd1b5da-94bb-4bf2-8fed-958df80a8806") : failed to sync configmap cache: timed out waiting for the condition Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.551466 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.569003 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.584155 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.598673 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.605639 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.615654 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.630773 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.633323 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.672748 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.717114 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:44Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.776574 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.891116 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 20:28:36.322552443 +0000 UTC Feb 01 07:23:44 crc kubenswrapper[4650]: I0201 07:23:44.964276 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:44 crc kubenswrapper[4650]: E0201 07:23:44.964465 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.002600 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8dd1b5da-94bb-4bf2-8fed-958df80a8806-mcd-auth-proxy-config\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.002659 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8dd1b5da-94bb-4bf2-8fed-958df80a8806-proxy-tls\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.003798 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8dd1b5da-94bb-4bf2-8fed-958df80a8806-mcd-auth-proxy-config\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.012660 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8dd1b5da-94bb-4bf2-8fed-958df80a8806-proxy-tls\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.104053 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzwg6\" (UniqueName: \"kubernetes.io/projected/8dd1b5da-94bb-4bf2-8fed-958df80a8806-kube-api-access-xzwg6\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.107344 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzwg6\" (UniqueName: \"kubernetes.io/projected/8dd1b5da-94bb-4bf2-8fed-958df80a8806-kube-api-access-xzwg6\") pod \"machine-config-daemon-xfq9r\" (UID: \"8dd1b5da-94bb-4bf2-8fed-958df80a8806\") " pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.227520 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230552 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0" exitCode=0 Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230633 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230701 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230729 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230748 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230762 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230775 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.230790 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.231758 4650 generic.go:334] "Generic (PLEG): container finished" podID="f0ea3e95-72a7-4a87-ab12-6c31f7befe3b" containerID="681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7" exitCode=0 Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.231830 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerDied","Data":"681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7"} Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.267136 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.297211 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.315433 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.337183 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.352410 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.363918 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.365847 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:23:45 crc kubenswrapper[4650]: W0201 07:23:45.377854 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dd1b5da_94bb_4bf2_8fed_958df80a8806.slice/crio-3552566c51ad16c92a4977b97f2ab9557ebf0890f5c860cead9d4cfec16cfec1 WatchSource:0}: Error finding container 3552566c51ad16c92a4977b97f2ab9557ebf0890f5c860cead9d4cfec16cfec1: Status 404 returned error can't find the container with id 3552566c51ad16c92a4977b97f2ab9557ebf0890f5c860cead9d4cfec16cfec1 Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.379225 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.400303 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.418716 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.449606 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.466598 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.481786 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.503449 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.507782 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.507816 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.507944 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.507968 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.507980 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.508028 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:49.508011598 +0000 UTC m=+28.231109843 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.508103 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.508152 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.508172 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.508261 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:49.508235523 +0000 UTC m=+28.231333778 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.523770 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.537489 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.551491 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.576291 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.590581 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.608613 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.622801 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.634684 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.648678 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.663403 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.677066 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.694457 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.709513 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.709857 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:23:49.70981325 +0000 UTC m=+28.432911505 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.710059 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.710112 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.710247 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.710316 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:49.710304542 +0000 UTC m=+28.433402797 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.710839 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.710883 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:49.710872867 +0000 UTC m=+28.433971122 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.711337 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:45Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.891377 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 17:33:31.892143776 +0000 UTC Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.965251 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:45 crc kubenswrapper[4650]: I0201 07:23:45.965316 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.965739 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:45 crc kubenswrapper[4650]: E0201 07:23:45.965851 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.237717 4650 generic.go:334] "Generic (PLEG): container finished" podID="f0ea3e95-72a7-4a87-ab12-6c31f7befe3b" containerID="566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972" exitCode=0 Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.237796 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerDied","Data":"566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972"} Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.240448 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487"} Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.240550 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1"} Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.240637 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"3552566c51ad16c92a4977b97f2ab9557ebf0890f5c860cead9d4cfec16cfec1"} Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.261477 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.285333 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.300404 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.325167 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.345688 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.362652 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.379442 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.401751 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.419349 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-gz868"] Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.419965 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.420874 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.425881 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.426013 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.426085 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.426168 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.441766 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.452457 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.466617 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.496532 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.516882 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.518415 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7l7d\" (UniqueName: \"kubernetes.io/projected/b949fef4-4a92-4734-8edc-4c9f9b2515af-kube-api-access-f7l7d\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.518487 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b949fef4-4a92-4734-8edc-4c9f9b2515af-host\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.518551 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b949fef4-4a92-4734-8edc-4c9f9b2515af-serviceca\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.532473 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.545145 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.558837 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.573622 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.586708 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.598876 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.613408 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.619981 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7l7d\" (UniqueName: \"kubernetes.io/projected/b949fef4-4a92-4734-8edc-4c9f9b2515af-kube-api-access-f7l7d\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.620053 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b949fef4-4a92-4734-8edc-4c9f9b2515af-host\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.620077 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b949fef4-4a92-4734-8edc-4c9f9b2515af-serviceca\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.620159 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b949fef4-4a92-4734-8edc-4c9f9b2515af-host\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.622088 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b949fef4-4a92-4734-8edc-4c9f9b2515af-serviceca\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.634791 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.664214 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7l7d\" (UniqueName: \"kubernetes.io/projected/b949fef4-4a92-4734-8edc-4c9f9b2515af-kube-api-access-f7l7d\") pod \"node-ca-gz868\" (UID: \"b949fef4-4a92-4734-8edc-4c9f9b2515af\") " pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.697215 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.743707 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.779173 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.833669 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.874257 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:46Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.892536 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 12:23:50.150893104 +0000 UTC Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.936940 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-gz868" Feb 01 07:23:46 crc kubenswrapper[4650]: I0201 07:23:46.965402 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:46 crc kubenswrapper[4650]: E0201 07:23:46.965530 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.253000 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.256718 4650 generic.go:334] "Generic (PLEG): container finished" podID="f0ea3e95-72a7-4a87-ab12-6c31f7befe3b" containerID="f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4" exitCode=0 Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.256814 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerDied","Data":"f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4"} Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.266821 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-gz868" event={"ID":"b949fef4-4a92-4734-8edc-4c9f9b2515af","Type":"ContainerStarted","Data":"2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f"} Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.266908 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-gz868" event={"ID":"b949fef4-4a92-4734-8edc-4c9f9b2515af","Type":"ContainerStarted","Data":"d89c0e825a13e37b9ebcfebbf4682eefb785fb197750ea7005a37016458cf4a2"} Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.285562 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.303093 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.324291 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.338860 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.353031 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.373772 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.386758 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.418327 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.437378 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.462457 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.477062 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.492209 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.512578 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.527394 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.544920 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.571057 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.588568 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.599587 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.614144 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.659093 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.706365 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.735833 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.781297 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.817777 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.858159 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.893593 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 12:37:59.829506752 +0000 UTC Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.901990 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.932466 4650 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.935758 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.935792 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.935804 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.935863 4650 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.937729 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:47Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.968224 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:47 crc kubenswrapper[4650]: E0201 07:23:47.968448 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.968544 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:47 crc kubenswrapper[4650]: E0201 07:23:47.968718 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.989097 4650 kubelet_node_status.go:115] "Node was previously registered" node="crc" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.989609 4650 kubelet_node_status.go:79] "Successfully registered node" node="crc" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.991109 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.991189 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.991213 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.991247 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:47 crc kubenswrapper[4650]: I0201 07:23:47.991270 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:47Z","lastTransitionTime":"2026-02-01T07:23:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: E0201 07:23:48.014700 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.018634 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.018665 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.018676 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.018699 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.018711 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.018909 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: E0201 07:23:48.032222 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.037425 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.037459 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.037471 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.037492 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.037506 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: E0201 07:23:48.052536 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.055896 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.055932 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.055946 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.055968 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.055980 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: E0201 07:23:48.069971 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.074063 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.074091 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.074102 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.074123 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.074134 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: E0201 07:23:48.086794 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: E0201 07:23:48.086917 4650 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.088439 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.088464 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.088476 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.088494 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.088507 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.191453 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.191499 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.191515 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.191538 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.191553 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.276849 4650 generic.go:334] "Generic (PLEG): container finished" podID="f0ea3e95-72a7-4a87-ab12-6c31f7befe3b" containerID="eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f" exitCode=0 Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.276922 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerDied","Data":"eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.296682 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.296752 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.296771 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.296802 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.296822 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.311641 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.336340 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.360955 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.383661 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.400616 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.400712 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.400990 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.401066 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.401095 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.407952 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.429746 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.449083 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.469474 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.494494 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.503112 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.503167 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.503182 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.503226 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.503245 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.512397 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.525017 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.539098 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.565610 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.584586 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:48Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.605760 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.605820 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.605837 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.605865 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.605881 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.708745 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.708816 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.708839 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.708865 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.708885 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.812289 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.812372 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.812580 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.812616 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.812642 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.894689 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 14:55:48.543793535 +0000 UTC Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.916588 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.916647 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.916659 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.916678 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.916691 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:48Z","lastTransitionTime":"2026-02-01T07:23:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:48 crc kubenswrapper[4650]: I0201 07:23:48.964540 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:48 crc kubenswrapper[4650]: E0201 07:23:48.964729 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.020143 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.020229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.020249 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.020278 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.020296 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.123976 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.124083 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.124116 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.124141 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.124155 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.227111 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.227171 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.227193 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.227219 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.227238 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.294122 4650 generic.go:334] "Generic (PLEG): container finished" podID="f0ea3e95-72a7-4a87-ab12-6c31f7befe3b" containerID="fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013" exitCode=0 Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.294235 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerDied","Data":"fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.358973 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.359037 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.359049 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.359068 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.359081 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.359901 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.385364 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.410455 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.427411 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.445470 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.464705 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.467090 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.467200 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.467229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.467308 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.467373 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.483675 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.502687 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.523859 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.543373 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.559716 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.559766 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560047 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560071 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560082 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560135 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:57.560121939 +0000 UTC m=+36.283220184 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560919 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560942 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560953 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.560983 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:57.560974841 +0000 UTC m=+36.284073086 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.562306 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.573248 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.576015 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.576104 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.576123 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.576151 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.576170 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.585713 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.607400 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:49Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.678461 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.678516 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.678528 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.678551 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.678565 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.761759 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.761888 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.761917 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.761956 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:23:57.761934351 +0000 UTC m=+36.485032596 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.761996 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.762069 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:57.762057355 +0000 UTC m=+36.485155600 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.762166 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.762293 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:57.76225709 +0000 UTC m=+36.485355365 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.781655 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.781702 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.781718 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.781737 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.781751 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.884961 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.885013 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.885097 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.885124 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.885145 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.896094 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 17:26:38.557861003 +0000 UTC Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.966658 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.966724 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.966861 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:49 crc kubenswrapper[4650]: E0201 07:23:49.966932 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.991098 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.991159 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.991174 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.991202 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:49 crc kubenswrapper[4650]: I0201 07:23:49.991218 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:49Z","lastTransitionTime":"2026-02-01T07:23:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.096109 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.096153 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.096173 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.096198 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.096216 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.198626 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.198675 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.198688 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.198707 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.198722 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.300510 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.300558 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.300570 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.300590 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.300604 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.303054 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.303349 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.303381 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.310848 4650 generic.go:334] "Generic (PLEG): container finished" podID="f0ea3e95-72a7-4a87-ab12-6c31f7befe3b" containerID="71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7" exitCode=0 Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.310922 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerDied","Data":"71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.323971 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.334534 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.336074 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.348224 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.369924 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.385958 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.402898 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.404127 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.404178 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.404188 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.404206 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.404218 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.418587 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.438142 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.455427 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.471235 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.495159 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.512607 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.513562 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.513612 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.513629 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.513654 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.513670 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.533735 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.559484 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.572543 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.583444 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.593852 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.605435 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.617234 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.617308 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.617324 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.617345 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.617387 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.623932 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.635964 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.651309 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.662816 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.678358 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.695192 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.711539 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.720161 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.720211 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.720230 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.720261 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.720279 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.730721 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.748321 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.796418 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.820409 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:50Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.822593 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.822625 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.822638 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.822657 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.822670 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.896308 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 13:33:30.251089262 +0000 UTC Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.925539 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.925605 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.925623 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.925652 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.925670 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:50Z","lastTransitionTime":"2026-02-01T07:23:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:50 crc kubenswrapper[4650]: I0201 07:23:50.964948 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:50 crc kubenswrapper[4650]: E0201 07:23:50.965127 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.029005 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.029098 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.029120 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.029147 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.029166 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.133367 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.133418 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.133438 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.133462 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.133482 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.236586 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.236690 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.236722 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.236750 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.236769 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.327011 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.327017 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" event={"ID":"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b","Type":"ContainerStarted","Data":"c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.340279 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.340331 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.340368 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.340395 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.340413 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.351080 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.368683 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.392013 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.414748 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.433657 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.443701 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.443735 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.443745 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.443766 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.443778 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.455444 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.471266 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.496501 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.519364 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.536253 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.546988 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.547062 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.547085 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.547110 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.547128 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.560436 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.591299 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.613957 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.638507 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.655217 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.659863 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.659911 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.659924 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.659940 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.659952 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.713725 4650 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.762922 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.762992 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.763004 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.763046 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.763059 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.865531 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.865578 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.865587 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.865603 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.865613 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.896937 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 06:40:39.243573282 +0000 UTC Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.964672 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:51 crc kubenswrapper[4650]: E0201 07:23:51.965009 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.964719 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:51 crc kubenswrapper[4650]: E0201 07:23:51.965397 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.967590 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.967660 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.967672 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.967691 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.967702 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:51Z","lastTransitionTime":"2026-02-01T07:23:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.982213 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:51 crc kubenswrapper[4650]: I0201 07:23:51.993433 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:51Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.007575 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.020401 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.037430 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.052726 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.066202 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.071592 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.071636 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.071646 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.071659 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.071669 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.082095 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.112810 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.132770 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.145215 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.161005 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.174452 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.174670 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.174773 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.174898 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.175018 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.182860 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.198733 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:52Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.278130 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.278214 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.278242 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.278281 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.278312 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.381820 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.381888 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.381907 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.381931 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.381945 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.484325 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.484380 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.484394 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.484415 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.484434 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.588268 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.588333 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.588348 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.588378 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.588396 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.690980 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.691076 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.691101 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.691145 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.691165 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.793971 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.794021 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.794071 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.794095 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.794113 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.897068 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.897132 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.897150 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.897208 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.897230 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:52Z","lastTransitionTime":"2026-02-01T07:23:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.898310 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 16:53:15.000378187 +0000 UTC Feb 01 07:23:52 crc kubenswrapper[4650]: I0201 07:23:52.965285 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:52 crc kubenswrapper[4650]: E0201 07:23:52.965833 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.000581 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.000668 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.000686 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.000713 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.000765 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.104445 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.104867 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.105141 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.105371 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.105601 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.209215 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.209266 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.209280 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.209300 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.209318 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.312404 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.312460 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.312472 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.312491 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.312507 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.340364 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/0.log" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.344219 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef" exitCode=1 Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.344277 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.345472 4650 scope.go:117] "RemoveContainer" containerID="b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.371811 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.395281 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.416881 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.416948 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.416968 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.417623 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.418645 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.420417 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.445046 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.476500 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.493991 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.523017 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.523063 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.523074 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.523091 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.523103 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.526886 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.547171 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.565403 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.599075 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:52Z\\\",\\\"message\\\":\\\"dler.go:208] Removed *v1.Node event handler 7\\\\nI0201 07:23:52.486506 5836 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0201 07:23:52.486515 5836 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0201 07:23:52.486522 5836 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0201 07:23:52.486529 5836 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0201 07:23:52.486536 5836 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0201 07:23:52.486539 5836 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0201 07:23:52.486543 5836 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0201 07:23:52.486621 5836 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0201 07:23:52.486910 5836 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0201 07:23:52.487706 5836 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0201 07:23:52.488120 5836 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.624726 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.627294 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.627358 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.627377 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.627402 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.627420 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.652121 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.668848 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.691316 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:53Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.729658 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.729712 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.729722 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.729742 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.729756 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.832561 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.832645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.832663 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.832693 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.832710 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.899123 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 11:54:13.37419169 +0000 UTC Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.935415 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.935468 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.935484 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.935531 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.935550 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:53Z","lastTransitionTime":"2026-02-01T07:23:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.964737 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:53 crc kubenswrapper[4650]: I0201 07:23:53.965105 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:53 crc kubenswrapper[4650]: E0201 07:23:53.965210 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:53 crc kubenswrapper[4650]: E0201 07:23:53.965398 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.037461 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.037490 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.037499 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.037512 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.037523 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.140121 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.140555 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.140789 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.140826 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.140844 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.243683 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.243742 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.243760 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.243777 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.243790 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.347802 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.348171 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.348202 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.348234 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.348258 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.352005 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/0.log" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.356111 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.357369 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.373272 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.389708 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.407181 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.451564 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.451614 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.451624 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.451645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.451661 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.454865 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.480765 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.504301 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.527229 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.548753 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.554543 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.554597 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.554612 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.554631 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.554643 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.565574 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.584996 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.607771 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:52Z\\\",\\\"message\\\":\\\"dler.go:208] Removed *v1.Node event handler 7\\\\nI0201 07:23:52.486506 5836 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0201 07:23:52.486515 5836 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0201 07:23:52.486522 5836 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0201 07:23:52.486529 5836 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0201 07:23:52.486536 5836 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0201 07:23:52.486539 5836 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0201 07:23:52.486543 5836 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0201 07:23:52.486621 5836 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0201 07:23:52.486910 5836 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0201 07:23:52.487706 5836 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0201 07:23:52.488120 5836 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.624989 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.641567 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.657251 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.657279 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.657287 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.657305 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.657318 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.658090 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:54Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.760376 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.760411 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.760421 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.760445 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.760457 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.862886 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.862966 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.862983 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.863054 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.863073 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.899739 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 10:30:55.668787636 +0000 UTC Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.964238 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:54 crc kubenswrapper[4650]: E0201 07:23:54.964416 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.966263 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.966328 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.966347 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.966376 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:54 crc kubenswrapper[4650]: I0201 07:23:54.966422 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:54Z","lastTransitionTime":"2026-02-01T07:23:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.079692 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.079758 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.079778 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.079805 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.079827 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.183661 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.184136 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.184274 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.184432 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.184588 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.287902 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.288259 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.288394 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.288520 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.288685 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.362710 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/1.log" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.364126 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/0.log" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.368471 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f" exitCode=1 Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.368688 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.368939 4650 scope.go:117] "RemoveContainer" containerID="b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.369913 4650 scope.go:117] "RemoveContainer" containerID="edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f" Feb 01 07:23:55 crc kubenswrapper[4650]: E0201 07:23:55.370297 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.391634 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.391699 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.391751 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.391816 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.391889 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.392800 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.410264 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.431019 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.462366 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b553911431da00f9f5e6f1fe702f0b453c7bff2eb8db8984a9ffa90c000669ef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:52Z\\\",\\\"message\\\":\\\"dler.go:208] Removed *v1.Node event handler 7\\\\nI0201 07:23:52.486506 5836 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0201 07:23:52.486515 5836 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0201 07:23:52.486522 5836 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0201 07:23:52.486529 5836 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0201 07:23:52.486536 5836 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0201 07:23:52.486539 5836 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0201 07:23:52.486543 5836 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0201 07:23:52.486621 5836 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0201 07:23:52.486910 5836 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0201 07:23:52.487706 5836 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0201 07:23:52.488120 5836 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.486234 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.495610 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.495689 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.495708 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.495735 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.495754 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.512473 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.530417 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.553926 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.577825 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.600074 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.600124 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.600169 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.600200 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.600220 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.604348 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.628958 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.653270 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.674751 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.698378 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:55Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.704085 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.704313 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.704464 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.704609 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.704786 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.808085 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.808151 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.808169 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.808195 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.808217 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.900761 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 22:04:59.577365061 +0000 UTC Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.911743 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.911804 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.911823 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.911848 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.911867 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:55Z","lastTransitionTime":"2026-02-01T07:23:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.964864 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:55 crc kubenswrapper[4650]: I0201 07:23:55.964899 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:55 crc kubenswrapper[4650]: E0201 07:23:55.965119 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:55 crc kubenswrapper[4650]: E0201 07:23:55.965251 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.015539 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.015599 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.015620 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.015646 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.015666 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.119976 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.120100 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.120123 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.120155 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.120178 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.223934 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.224919 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.224940 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.224964 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.224981 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.328946 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.329015 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.329077 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.329111 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.329134 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.375634 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/1.log" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.381089 4650 scope.go:117] "RemoveContainer" containerID="edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f" Feb 01 07:23:56 crc kubenswrapper[4650]: E0201 07:23:56.381350 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.404644 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.433823 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.433881 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.433901 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.433928 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.433948 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.434660 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.450714 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.471520 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.494891 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.515137 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.537861 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.537929 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.537947 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.537973 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.537993 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.538014 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.558404 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.569138 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj"] Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.569778 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: W0201 07:23:56.573116 4650 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd": failed to list *v1.Secret: secrets "ovn-kubernetes-control-plane-dockercfg-gs7dd" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Feb 01 07:23:56 crc kubenswrapper[4650]: E0201 07:23:56.573182 4650 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-gs7dd\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-kubernetes-control-plane-dockercfg-gs7dd\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:23:56 crc kubenswrapper[4650]: W0201 07:23:56.573263 4650 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert": failed to list *v1.Secret: secrets "ovn-control-plane-metrics-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Feb 01 07:23:56 crc kubenswrapper[4650]: E0201 07:23:56.573287 4650 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-control-plane-metrics-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.585081 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.609184 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.630570 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.642410 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.642488 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.642506 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.642536 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.642556 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.651694 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.652745 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26sdv\" (UniqueName: \"kubernetes.io/projected/987b2d65-e234-4350-9aa3-abbd99a6ca8c-kube-api-access-26sdv\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.652814 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/987b2d65-e234-4350-9aa3-abbd99a6ca8c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.652851 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/987b2d65-e234-4350-9aa3-abbd99a6ca8c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.652899 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/987b2d65-e234-4350-9aa3-abbd99a6ca8c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.669753 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.695782 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.736160 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.746089 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.746152 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.746172 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.746197 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.746216 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.751267 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.753777 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/987b2d65-e234-4350-9aa3-abbd99a6ca8c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.753917 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26sdv\" (UniqueName: \"kubernetes.io/projected/987b2d65-e234-4350-9aa3-abbd99a6ca8c-kube-api-access-26sdv\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.753970 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/987b2d65-e234-4350-9aa3-abbd99a6ca8c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.754068 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/987b2d65-e234-4350-9aa3-abbd99a6ca8c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.755357 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/987b2d65-e234-4350-9aa3-abbd99a6ca8c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.756192 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/987b2d65-e234-4350-9aa3-abbd99a6ca8c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.763376 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.780571 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.789010 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26sdv\" (UniqueName: \"kubernetes.io/projected/987b2d65-e234-4350-9aa3-abbd99a6ca8c-kube-api-access-26sdv\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.803109 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.816883 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.831913 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.844178 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.849156 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.849223 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.849237 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.849256 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.849270 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.862513 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.878911 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.893477 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.902171 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 01:53:25.762893174 +0000 UTC Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.908356 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.921951 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.943397 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.952394 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.952481 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.952506 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.952541 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.952567 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:56Z","lastTransitionTime":"2026-02-01T07:23:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.958795 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:56Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:56 crc kubenswrapper[4650]: I0201 07:23:56.965131 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:56 crc kubenswrapper[4650]: E0201 07:23:56.965334 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.055913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.055963 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.055980 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.056004 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.056021 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.158560 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.158607 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.158623 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.158646 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.158662 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.261645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.261708 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.261729 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.261757 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.261779 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.365587 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.365694 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.365720 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.365756 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.365792 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.397137 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.469825 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.469913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.469931 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.469959 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.469979 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.561007 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.561129 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561399 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561433 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561426 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561456 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561478 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561504 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561552 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:13.561521311 +0000 UTC m=+52.284619586 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.561668 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:13.561566272 +0000 UTC m=+52.284664547 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.573986 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.574076 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.574102 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.574160 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.574181 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.584158 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.594010 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/987b2d65-e234-4350-9aa3-abbd99a6ca8c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-mscbj\" (UID: \"987b2d65-e234-4350-9aa3-abbd99a6ca8c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.676868 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.676919 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.676941 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.676968 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.676989 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.728804 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-jvgsf"] Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.730774 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.730960 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.750206 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.763184 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.763395 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mbbv\" (UniqueName: \"kubernetes.io/projected/f4593d40-c6e1-42fa-8c18-053ff31304b3-kube-api-access-4mbbv\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.763454 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.763491 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.763530 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.763663 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:24:13.763639491 +0000 UTC m=+52.486737766 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.763795 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.763857 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:13.763843756 +0000 UTC m=+52.486942031 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.764316 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.764376 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:13.76436076 +0000 UTC m=+52.487459045 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.772338 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.780591 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.780643 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.780659 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.780684 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.780705 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.789082 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.796867 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.806129 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.834449 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.848393 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.859963 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.864808 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mbbv\" (UniqueName: \"kubernetes.io/projected/f4593d40-c6e1-42fa-8c18-053ff31304b3-kube-api-access-4mbbv\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.864886 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.865007 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.865081 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:58.365065835 +0000 UTC m=+37.088164080 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.871643 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.882571 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.882616 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.882628 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.882645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.882973 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.884921 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.890648 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mbbv\" (UniqueName: \"kubernetes.io/projected/f4593d40-c6e1-42fa-8c18-053ff31304b3-kube-api-access-4mbbv\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.896377 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.902580 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 22:10:34.315515546 +0000 UTC Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.915373 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.932726 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.944523 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.954938 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.966960 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.967365 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.967428 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.967555 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:57 crc kubenswrapper[4650]: E0201 07:23:57.968016 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.986660 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.986688 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.986697 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.986711 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.986721 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:57Z","lastTransitionTime":"2026-02-01T07:23:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:57 crc kubenswrapper[4650]: I0201 07:23:57.993661 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:57Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.089058 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.089103 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.089120 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.089142 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.089170 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.191075 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.201788 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.201871 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.201893 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.201919 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.201948 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.209517 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.209570 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.209583 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.209603 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.209617 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.212200 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.231991 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.235113 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.237014 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.237067 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.237080 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.237100 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.237115 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.250396 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.252379 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.257155 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.257214 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.257226 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.257246 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.257258 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.267388 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.272146 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.285585 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.285688 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.285703 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.285720 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.285733 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.300649 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.329871 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.355809 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.368619 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.370367 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.370704 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.370809 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.371350 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:23:59.371333445 +0000 UTC m=+38.094431690 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.373215 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.373241 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.373250 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.373266 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.373276 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.390655 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" event={"ID":"987b2d65-e234-4350-9aa3-abbd99a6ca8c","Type":"ContainerStarted","Data":"39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.390716 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" event={"ID":"987b2d65-e234-4350-9aa3-abbd99a6ca8c","Type":"ContainerStarted","Data":"7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.390730 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" event={"ID":"987b2d65-e234-4350-9aa3-abbd99a6ca8c","Type":"ContainerStarted","Data":"34b11f6151e06b0539b366c3e8f34a08f22422c69cfcb8d776b809af11b7d884"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.395529 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.405441 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.405558 4650 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.407312 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.407340 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.407349 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.407368 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.407379 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.419356 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.435117 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.451123 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.465063 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.474533 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.485960 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.506558 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.510576 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.510615 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.510627 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.510645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.510659 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.522263 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.539139 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.556454 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.571286 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.581237 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.599677 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.613578 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.613791 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.613939 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.614136 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.614285 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.617517 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.638266 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.654918 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.676062 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.693669 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.712923 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.724908 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.724972 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.724992 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.725019 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.725079 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.737253 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.754949 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.770107 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.800681 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:23:58Z is after 2025-08-24T17:21:41Z" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.827958 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.828089 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.828120 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.828156 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.828185 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.902994 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 22:03:23.053458194 +0000 UTC Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.932671 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.932730 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.932752 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.932776 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.932795 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:58Z","lastTransitionTime":"2026-02-01T07:23:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:58 crc kubenswrapper[4650]: I0201 07:23:58.964352 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:23:58 crc kubenswrapper[4650]: E0201 07:23:58.964602 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.035595 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.035718 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.035745 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.035777 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.035803 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.141446 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.141880 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.142140 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.142352 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.142551 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.246444 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.246522 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.246549 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.246581 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.246603 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.349757 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.349811 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.349830 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.349854 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.349872 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.380570 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:59 crc kubenswrapper[4650]: E0201 07:23:59.380800 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:23:59 crc kubenswrapper[4650]: E0201 07:23:59.380900 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:01.380876309 +0000 UTC m=+40.103974594 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.454083 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.454159 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.454182 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.454208 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.454226 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.558604 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.558670 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.558691 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.558718 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.558737 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.661509 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.661685 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.661765 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.661855 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.661936 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.764350 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.764413 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.764431 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.764457 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.764474 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.867116 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.867186 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.867211 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.867240 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.867287 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.903899 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 14:40:21.478849238 +0000 UTC Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.964884 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:23:59 crc kubenswrapper[4650]: E0201 07:23:59.965105 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.965318 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:23:59 crc kubenswrapper[4650]: E0201 07:23:59.965516 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.965344 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:23:59 crc kubenswrapper[4650]: E0201 07:23:59.965684 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.971258 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.971300 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.971317 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.971340 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:23:59 crc kubenswrapper[4650]: I0201 07:23:59.971360 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:23:59Z","lastTransitionTime":"2026-02-01T07:23:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.074813 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.074864 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.074882 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.074910 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.074926 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.178265 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.178325 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.178342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.178368 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.178386 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.281716 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.281766 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.281787 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.281811 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.281828 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.385946 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.386071 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.386103 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.386128 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.386145 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.488942 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.489009 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.489060 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.489087 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.489106 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.592617 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.592676 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.592692 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.592717 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.592735 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.695853 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.695913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.695931 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.695960 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.695978 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.799464 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.799530 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.799546 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.799571 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.799590 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.902605 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.903785 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.904014 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.905327 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.905373 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:00Z","lastTransitionTime":"2026-02-01T07:24:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.905492 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 16:45:52.502069236 +0000 UTC Feb 01 07:24:00 crc kubenswrapper[4650]: I0201 07:24:00.964655 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:00 crc kubenswrapper[4650]: E0201 07:24:00.965186 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.008413 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.008508 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.008562 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.008587 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.008659 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.112408 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.112471 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.112488 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.112511 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.112528 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.221613 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.221681 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.221698 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.221723 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.221741 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.324819 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.324882 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.324903 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.324931 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.324950 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.404688 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:01 crc kubenswrapper[4650]: E0201 07:24:01.404987 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:01 crc kubenswrapper[4650]: E0201 07:24:01.405140 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:05.405109697 +0000 UTC m=+44.128207982 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.428175 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.428243 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.428265 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.428293 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.428314 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.532633 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.532688 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.532705 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.532729 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.532750 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.635491 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.635552 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.635573 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.635599 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.635619 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.739021 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.739107 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.739126 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.739152 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.739169 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.842973 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.843079 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.843099 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.843125 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.843143 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.906526 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 17:18:48.356536301 +0000 UTC Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.946237 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.946304 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.946324 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.946352 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.946370 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:01Z","lastTransitionTime":"2026-02-01T07:24:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.964624 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.964706 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.964625 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:01 crc kubenswrapper[4650]: E0201 07:24:01.964962 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:01 crc kubenswrapper[4650]: E0201 07:24:01.965195 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:01 crc kubenswrapper[4650]: E0201 07:24:01.964837 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:01 crc kubenswrapper[4650]: I0201 07:24:01.990330 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:01Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.012222 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.040547 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.049777 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.049842 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.049864 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.049890 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.049909 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.065576 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.083847 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.103584 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.118802 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.138004 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.153542 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.153597 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.153609 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.153628 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.153642 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.164758 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.184318 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.197270 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.210816 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.223744 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.233781 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.244591 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.255805 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.255848 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.255863 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.255883 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.255896 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.271195 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:02Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.359233 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.359286 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.359298 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.359320 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.359333 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.462086 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.462129 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.462142 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.462161 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.462173 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.565057 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.565423 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.565565 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.565703 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.565825 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.669174 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.669240 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.669257 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.669284 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.669306 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.772889 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.773254 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.773436 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.773634 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.773820 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.876905 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.877367 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.877513 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.877680 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.877816 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.907397 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 05:09:15.351231449 +0000 UTC Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.964450 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:02 crc kubenswrapper[4650]: E0201 07:24:02.965441 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.980795 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.980913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.980938 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.980973 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:02 crc kubenswrapper[4650]: I0201 07:24:02.980995 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:02Z","lastTransitionTime":"2026-02-01T07:24:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.085187 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.085260 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.085277 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.085301 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.085318 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.188524 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.188650 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.188676 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.188702 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.188722 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.292290 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.292355 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.292371 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.292386 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.292395 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.394637 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.394726 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.394752 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.394783 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.394809 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.498258 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.498333 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.498351 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.498378 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.498397 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.602451 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.602524 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.602544 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.602572 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.602596 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.705854 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.705909 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.705926 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.705947 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.705964 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.809927 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.809983 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.810001 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.810172 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.810206 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.908537 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 08:30:09.504425201 +0000 UTC Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.917358 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.917414 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.917433 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.917458 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.917476 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:03Z","lastTransitionTime":"2026-02-01T07:24:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.965103 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:03 crc kubenswrapper[4650]: E0201 07:24:03.965332 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.965354 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:03 crc kubenswrapper[4650]: I0201 07:24:03.965355 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:03 crc kubenswrapper[4650]: E0201 07:24:03.965531 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:03 crc kubenswrapper[4650]: E0201 07:24:03.965942 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.020949 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.021127 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.021150 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.021180 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.021201 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.125059 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.125131 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.125154 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.125181 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.125202 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.228939 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.229006 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.229060 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.229091 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.229116 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.332400 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.332519 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.332544 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.332583 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.332606 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.434987 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.435097 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.435123 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.435153 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.435177 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.538643 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.538732 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.538750 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.538775 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.538793 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.642705 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.642788 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.642815 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.642881 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.642905 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.746293 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.746436 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.746465 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.746499 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.746529 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.849662 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.849792 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.849818 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.849851 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.849876 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.909591 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 02:40:57.091836056 +0000 UTC Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.953301 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.953415 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.953436 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.953511 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.953545 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:04Z","lastTransitionTime":"2026-02-01T07:24:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:04 crc kubenswrapper[4650]: I0201 07:24:04.965102 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:04 crc kubenswrapper[4650]: E0201 07:24:04.965307 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.056477 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.056532 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.056548 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.056574 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.056590 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.159520 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.159578 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.159596 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.159620 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.159638 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.262997 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.263138 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.263162 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.263193 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.263218 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.366383 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.366448 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.366465 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.366489 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.366508 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.458431 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:05 crc kubenswrapper[4650]: E0201 07:24:05.458625 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:05 crc kubenswrapper[4650]: E0201 07:24:05.458701 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:13.458676765 +0000 UTC m=+52.181775040 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.469640 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.469687 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.469705 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.469769 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.469830 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.573330 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.573465 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.573491 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.573516 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.573537 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.677544 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.677626 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.677651 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.677690 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.677716 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.780102 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.780157 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.780174 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.780199 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.780216 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.883088 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.883154 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.883177 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.883205 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.883223 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.910864 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 18:33:18.923884122 +0000 UTC Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.964640 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.964720 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.964738 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:05 crc kubenswrapper[4650]: E0201 07:24:05.964854 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:05 crc kubenswrapper[4650]: E0201 07:24:05.964995 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:05 crc kubenswrapper[4650]: E0201 07:24:05.965205 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.986877 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.986960 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.986981 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.987427 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:05 crc kubenswrapper[4650]: I0201 07:24:05.987650 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:05Z","lastTransitionTime":"2026-02-01T07:24:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.091744 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.091806 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.091823 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.091847 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.091866 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.195798 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.195875 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.195897 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.195937 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.195960 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.299601 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.299678 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.299698 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.299724 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.299742 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.402619 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.402714 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.402767 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.402790 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.402807 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.506069 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.506129 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.506146 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.506169 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.506187 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.609613 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.609677 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.609693 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.609723 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.609745 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.713979 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.714069 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.714088 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.714116 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.714134 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.816888 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.816986 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.817004 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.817066 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.817090 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.911427 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 01:12:20.081719895 +0000 UTC Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.919831 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.919888 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.919904 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.919927 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.919944 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:06Z","lastTransitionTime":"2026-02-01T07:24:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.964490 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:06 crc kubenswrapper[4650]: E0201 07:24:06.965122 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:06 crc kubenswrapper[4650]: I0201 07:24:06.966274 4650 scope.go:117] "RemoveContainer" containerID="edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.022441 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.022488 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.022504 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.022527 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.022544 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.125610 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.125671 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.125687 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.125712 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.125731 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.228806 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.228832 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.228841 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.228855 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.228863 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.331968 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.332007 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.332078 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.332102 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.332116 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.428904 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/1.log" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.436361 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.436426 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.436443 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.436469 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.436488 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.437961 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.438995 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.462501 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.483767 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.506468 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.530101 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.538843 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.538913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.538925 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.538944 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.538956 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.545552 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.564389 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.580612 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.602477 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.618348 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.630590 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.640927 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.640960 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.640971 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.640986 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.640995 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.644927 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.661656 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.676338 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.696936 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.714583 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.730317 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:07Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.743297 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.743342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.743352 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.743371 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.743381 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.847532 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.847602 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.847619 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.847646 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.847664 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.911934 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 01:37:42.386172341 +0000 UTC Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.950462 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.950964 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.950990 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.951058 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.951088 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:07Z","lastTransitionTime":"2026-02-01T07:24:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.965308 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.965341 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:07 crc kubenswrapper[4650]: I0201 07:24:07.965507 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:07 crc kubenswrapper[4650]: E0201 07:24:07.965490 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:07 crc kubenswrapper[4650]: E0201 07:24:07.965680 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:07 crc kubenswrapper[4650]: E0201 07:24:07.965835 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.054184 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.054270 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.054287 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.054311 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.054329 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.157281 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.157337 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.157355 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.157378 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.157397 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.259979 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.260067 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.260085 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.260110 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.260161 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.363110 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.363169 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.363185 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.363211 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.363228 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.445009 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/2.log" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.445993 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/1.log" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.450969 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7" exitCode=1 Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.451050 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.451137 4650 scope.go:117] "RemoveContainer" containerID="edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.452020 4650 scope.go:117] "RemoveContainer" containerID="ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7" Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.452307 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.466094 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.466140 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.466158 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.466181 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.466211 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.479400 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.499874 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.519667 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.539560 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.560093 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.571105 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.571167 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.571189 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.571215 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.571232 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.582409 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.613348 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.634638 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.651263 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.666336 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.674405 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.674591 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.674758 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.674904 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.675123 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.689586 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edf33771b0415c8446e9220251d1471020be7642dfb9df4e8db2998fe326fd1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:23:54Z\\\",\\\"message\\\":\\\"o:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0201 07:23:54.418424 5987 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-xfq9r in node crc\\\\nI0201 07:23:54.418991 5987 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-xfq9r after 0 failed attempt(s)\\\\nI0201 07:23:54.418963 5987 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:23:54.419160 5987 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.701271 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.717632 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.731373 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.743929 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.755748 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.778392 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.778430 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.778442 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.778460 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.778472 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.793737 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.793799 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.793817 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.793842 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.793861 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.812519 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.816706 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.816728 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.816741 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.816755 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.816767 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.834447 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.837895 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.837971 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.837991 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.838017 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.838060 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.857188 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.862261 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.862363 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.862383 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.862409 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.862428 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.883438 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.889066 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.889136 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.889193 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.889224 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.889248 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.910897 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:08Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.911188 4650 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.912132 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 11:34:47.53726587 +0000 UTC Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.913692 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.913748 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.913766 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.913788 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.913806 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:08Z","lastTransitionTime":"2026-02-01T07:24:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:08 crc kubenswrapper[4650]: I0201 07:24:08.965132 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:08 crc kubenswrapper[4650]: E0201 07:24:08.965381 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.017157 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.017224 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.017242 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.017265 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.017283 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.120129 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.120194 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.120212 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.120235 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.120252 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.223334 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.223406 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.223424 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.223450 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.223469 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.326600 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.326653 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.326671 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.326696 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.326716 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.429927 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.429990 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.430067 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.430103 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.430127 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.465484 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/2.log" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.471626 4650 scope.go:117] "RemoveContainer" containerID="ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7" Feb 01 07:24:09 crc kubenswrapper[4650]: E0201 07:24:09.473301 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.491223 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.508883 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.526647 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.533420 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.533477 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.533501 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.533531 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.533556 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.543355 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.558662 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.574084 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.592212 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.613724 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.636153 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.636285 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.636342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.636368 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.636399 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.636423 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.654743 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.672672 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.686161 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.707863 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.723936 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.738938 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.739016 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.739074 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.739107 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.739131 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.739462 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.756992 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:09Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.841780 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.841845 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.841865 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.841893 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.841910 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.912680 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 09:29:46.488269965 +0000 UTC Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.945309 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.945377 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.945394 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.945424 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.945442 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:09Z","lastTransitionTime":"2026-02-01T07:24:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.964712 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.964784 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:09 crc kubenswrapper[4650]: E0201 07:24:09.964885 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:09 crc kubenswrapper[4650]: I0201 07:24:09.965075 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:09 crc kubenswrapper[4650]: E0201 07:24:09.965281 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:09 crc kubenswrapper[4650]: E0201 07:24:09.965440 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.048670 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.048728 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.048744 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.048767 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.048784 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.152539 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.152935 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.153135 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.153316 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.153478 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.256429 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.256465 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.256477 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.256493 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.256504 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.359350 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.359407 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.359423 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.359446 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.359464 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.462331 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.462652 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.462783 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.462913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.463129 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.566642 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.566710 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.566734 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.566762 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.566785 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.669914 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.669995 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.670018 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.670081 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.670105 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.773064 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.773143 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.773160 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.773186 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.773202 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.885537 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.885617 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.885640 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.885668 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.885689 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.914200 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 17:42:56.048941206 +0000 UTC Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.964993 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:10 crc kubenswrapper[4650]: E0201 07:24:10.965400 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.988250 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.988341 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.988364 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.988391 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:10 crc kubenswrapper[4650]: I0201 07:24:10.988413 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:10Z","lastTransitionTime":"2026-02-01T07:24:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.091842 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.091916 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.091934 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.091959 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.091977 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.195696 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.195780 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.195802 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.195833 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.195855 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.299518 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.299579 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.299600 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.299626 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.299644 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.402804 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.402854 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.402897 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.402924 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.402941 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.506401 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.506787 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.506997 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.507242 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.507429 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.611953 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.612076 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.612100 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.612131 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.612156 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.716009 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.716165 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.716189 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.716217 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.716320 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.819498 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.819892 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.820104 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.820258 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.820383 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.914762 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 14:12:16.725807183 +0000 UTC Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.923537 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.923788 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.923953 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.924181 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.924374 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:11Z","lastTransitionTime":"2026-02-01T07:24:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.964558 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:11 crc kubenswrapper[4650]: E0201 07:24:11.964777 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.965149 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:11 crc kubenswrapper[4650]: E0201 07:24:11.965459 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.965585 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:11 crc kubenswrapper[4650]: E0201 07:24:11.965961 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:11 crc kubenswrapper[4650]: I0201 07:24:11.984134 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:11Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.004927 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.021978 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.027436 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.027772 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.028127 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.028530 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.028704 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.046134 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.066760 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.088327 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.108810 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.131449 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.133386 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.133467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.133493 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.133525 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.133548 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.151100 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.169834 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.202273 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.222428 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.236118 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.236176 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.236228 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.236253 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.236270 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.247252 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.263734 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.281806 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.299700 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:12Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.341245 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.341322 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.341341 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.341848 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.341914 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.446010 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.446127 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.446147 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.446181 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.446203 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.549803 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.549882 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.549903 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.549935 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.549955 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.653565 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.653635 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.653652 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.653683 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.653703 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.757171 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.757558 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.757711 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.757878 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.758099 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.861772 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.861867 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.861892 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.861925 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.861944 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.916513 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 22:53:08.217556197 +0000 UTC Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.964755 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:12 crc kubenswrapper[4650]: E0201 07:24:12.965171 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.965822 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.965859 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.965870 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.965887 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:12 crc kubenswrapper[4650]: I0201 07:24:12.965900 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:12Z","lastTransitionTime":"2026-02-01T07:24:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.070786 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.070848 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.070865 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.070890 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.070957 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.173819 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.173905 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.173927 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.173958 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.173986 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.278550 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.279185 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.279438 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.279676 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.279945 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.384104 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.384172 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.384195 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.384225 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.384248 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.487146 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.487202 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.487226 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.487255 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.487277 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.555461 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.555923 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.556145 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:29.556105503 +0000 UTC m=+68.279203778 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.590342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.590391 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.590411 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.590438 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.590458 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.656987 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.657091 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657273 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657300 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657295 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657368 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657389 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657321 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657473 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:45.657446655 +0000 UTC m=+84.380544940 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.657568 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:45.657544227 +0000 UTC m=+84.380642512 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.693807 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.693871 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.693888 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.693913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.693932 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.797575 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.797638 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.797654 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.797680 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.797699 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.859539 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.859739 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.859772 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.859838 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:24:45.859798741 +0000 UTC m=+84.582897016 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.859873 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.859936 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:45.859918894 +0000 UTC m=+84.583017229 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.860061 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.860210 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:24:45.860183121 +0000 UTC m=+84.583281406 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.901107 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.901159 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.901182 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.901210 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.901232 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:13Z","lastTransitionTime":"2026-02-01T07:24:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.917128 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 12:34:43.51151822 +0000 UTC Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.964814 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.964825 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:13 crc kubenswrapper[4650]: I0201 07:24:13.964954 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.965134 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.965287 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:13 crc kubenswrapper[4650]: E0201 07:24:13.965439 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.005338 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.005410 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.005452 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.005480 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.005505 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.109571 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.109629 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.109652 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.109680 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.109701 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.212936 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.212971 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.212981 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.212995 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.213007 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.316637 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.316729 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.316760 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.316793 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.316817 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.420710 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.420770 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.420787 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.420811 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.420825 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.523685 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.523764 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.523787 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.523817 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.523841 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.629883 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.629947 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.629973 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.630003 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.630062 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.739902 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.739960 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.739982 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.740011 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.740080 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.843181 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.843240 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.843257 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.843279 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.843296 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.917972 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 01:30:53.342676054 +0000 UTC Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.946168 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.946208 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.946224 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.946247 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.946263 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:14Z","lastTransitionTime":"2026-02-01T07:24:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:14 crc kubenswrapper[4650]: I0201 07:24:14.964214 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:14 crc kubenswrapper[4650]: E0201 07:24:14.964360 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.050078 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.050174 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.050193 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.050220 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.050241 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.153377 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.153434 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.153450 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.153472 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.153490 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.256844 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.256951 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.256969 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.256999 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.257018 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.360365 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.360432 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.360456 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.360487 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.360510 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.463347 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.463416 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.463433 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.463825 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.463881 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.569760 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.569820 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.569841 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.569869 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.569893 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.672862 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.672908 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.672925 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.672947 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.672964 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.778602 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.779238 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.779262 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.779287 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.779305 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.882334 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.882419 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.882444 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.882475 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.882498 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.919209 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 21:24:36.171957425 +0000 UTC Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.964917 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.964986 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.965002 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:15 crc kubenswrapper[4650]: E0201 07:24:15.965155 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:15 crc kubenswrapper[4650]: E0201 07:24:15.965339 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:15 crc kubenswrapper[4650]: E0201 07:24:15.965511 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.985434 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.985478 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.985496 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.985520 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:15 crc kubenswrapper[4650]: I0201 07:24:15.985538 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:15Z","lastTransitionTime":"2026-02-01T07:24:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.088367 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.088424 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.088441 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.088467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.088485 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.191496 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.191553 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.191572 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.191597 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.191615 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.294365 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.294421 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.294438 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.294464 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.294480 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.398364 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.398416 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.398433 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.398457 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.398476 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.500529 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.500620 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.500642 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.500666 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.500686 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.603512 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.603611 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.603628 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.603650 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.603670 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.707187 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.707251 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.707268 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.707296 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.707314 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.810795 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.810852 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.810871 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.810897 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.810915 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.913919 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.913981 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.913999 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.914022 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.914069 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:16Z","lastTransitionTime":"2026-02-01T07:24:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.920140 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 02:50:51.015129715 +0000 UTC Feb 01 07:24:16 crc kubenswrapper[4650]: I0201 07:24:16.964995 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:16 crc kubenswrapper[4650]: E0201 07:24:16.965206 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.017180 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.017255 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.017279 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.017307 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.017332 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.120343 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.120433 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.120457 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.120488 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.120509 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.223462 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.223798 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.223953 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.224147 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.224284 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.328321 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.328626 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.328643 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.328665 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.328681 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.431523 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.431585 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.431602 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.431628 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.431644 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.534576 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.534632 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.534649 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.534671 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.534689 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.637464 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.637541 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.637566 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.637596 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.637621 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.741128 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.741195 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.741213 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.741236 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.741254 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.843884 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.843948 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.843969 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.843993 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.844010 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.921122 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 21:28:29.786325415 +0000 UTC Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.947662 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.947726 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.947744 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.947770 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.947787 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:17Z","lastTransitionTime":"2026-02-01T07:24:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.965334 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.965375 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:17 crc kubenswrapper[4650]: E0201 07:24:17.965549 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:17 crc kubenswrapper[4650]: I0201 07:24:17.965611 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:17 crc kubenswrapper[4650]: E0201 07:24:17.965789 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:17 crc kubenswrapper[4650]: E0201 07:24:17.965928 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.050806 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.050852 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.050882 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.050905 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.050922 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.154741 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.154799 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.154818 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.154841 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.154858 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.257502 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.257597 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.257622 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.257651 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.257669 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.360374 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.360762 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.360904 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.361060 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.361196 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.464671 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.464753 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.464775 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.464832 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.464854 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.511420 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.526692 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.536755 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.560012 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.568326 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.568418 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.568447 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.568481 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.568504 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.579642 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.597780 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.618607 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.634784 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.647387 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.663366 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.671177 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.671254 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.671275 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.671300 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.671318 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.677806 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.693635 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.726157 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.745727 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.767461 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.774599 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.774688 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.774706 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.774731 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.774775 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.783952 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.800830 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.819872 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:18Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.878148 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.878217 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.878234 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.878258 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.878279 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.922004 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 17:27:43.684297933 +0000 UTC Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.966011 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:18 crc kubenswrapper[4650]: E0201 07:24:18.966311 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.981198 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.981244 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.981253 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.981267 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:18 crc kubenswrapper[4650]: I0201 07:24:18.981277 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:18Z","lastTransitionTime":"2026-02-01T07:24:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.048451 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.048513 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.048530 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.048553 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.048570 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.072407 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:19Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.078296 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.078371 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.078388 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.078412 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.078434 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.103019 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:19Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.107896 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.108114 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.108281 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.108420 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.108584 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.128512 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:19Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.134803 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.135093 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.135246 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.135408 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.135562 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.159407 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:19Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.164063 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.164122 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.164140 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.164164 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.164181 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.187154 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:19Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.187406 4650 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.190000 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.190073 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.190094 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.190124 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.190149 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.293576 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.293636 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.293653 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.293675 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.293692 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.396866 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.396928 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.396945 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.396971 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.396991 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.499722 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.499776 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.499795 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.499817 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.499834 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.602428 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.602494 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.602516 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.602546 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.602566 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.705661 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.705711 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.705728 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.705750 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.705769 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.809153 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.809212 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.809229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.809252 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.809269 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.913378 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.913428 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.913444 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.913467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.913487 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:19Z","lastTransitionTime":"2026-02-01T07:24:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.922399 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 06:27:50.316327013 +0000 UTC Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.964693 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.964876 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.965284 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:19 crc kubenswrapper[4650]: I0201 07:24:19.965423 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.965556 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:19 crc kubenswrapper[4650]: E0201 07:24:19.965715 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.016067 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.016132 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.016151 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.016174 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.016191 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.119848 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.119912 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.119931 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.119961 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.119979 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.223235 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.223313 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.223336 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.223367 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.223389 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.326467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.326528 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.326545 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.326567 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.326582 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.429771 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.429825 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.429842 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.430416 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.430466 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.533896 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.533970 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.533993 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.534019 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.534072 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.636818 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.636887 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.636907 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.636932 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.636950 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.739274 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.739345 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.739364 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.739387 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.739409 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.842293 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.842347 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.842360 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.842379 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.842392 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.923188 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 15:11:34.417655181 +0000 UTC Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.945653 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.945705 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.945724 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.945751 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.945770 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:20Z","lastTransitionTime":"2026-02-01T07:24:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.964782 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:20 crc kubenswrapper[4650]: E0201 07:24:20.965631 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:20 crc kubenswrapper[4650]: I0201 07:24:20.965780 4650 scope.go:117] "RemoveContainer" containerID="ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7" Feb 01 07:24:20 crc kubenswrapper[4650]: E0201 07:24:20.966774 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.049383 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.049434 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.049451 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.049475 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.049495 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.153288 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.153344 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.153354 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.153369 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.153378 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.262188 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.262279 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.262335 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.262401 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.262420 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.365713 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.365770 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.365787 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.365809 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.365828 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.468623 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.468680 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.468698 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.468723 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.468744 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.572013 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.572383 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.572545 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.572689 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.572815 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.676211 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.676300 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.676319 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.676345 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.676363 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.779678 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.779740 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.779757 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.779782 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.779798 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.883352 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.883421 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.883441 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.883468 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.883490 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.924138 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 05:59:52.656200553 +0000 UTC Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.964863 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.964935 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.964884 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:21 crc kubenswrapper[4650]: E0201 07:24:21.965172 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:21 crc kubenswrapper[4650]: E0201 07:24:21.965434 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:21 crc kubenswrapper[4650]: E0201 07:24:21.965683 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.983288 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:21Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.986584 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.986643 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.986665 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.986962 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:21 crc kubenswrapper[4650]: I0201 07:24:21.987007 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:21Z","lastTransitionTime":"2026-02-01T07:24:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.005480 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:21Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.024596 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.041112 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.058933 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.074534 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.090210 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.090483 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.090960 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.091426 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.091975 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.093209 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.112393 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.130948 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.149396 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.167180 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.183994 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.195362 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.195413 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.195425 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.195442 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.195458 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.203813 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.219655 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.232843 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.248971 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.272054 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:22Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.297866 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.297903 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.297911 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.297927 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.297937 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.400884 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.401332 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.401754 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.402006 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.402251 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.504984 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.505094 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.505113 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.505137 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.505154 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.608814 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.609217 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.609383 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.609539 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.609677 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.713865 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.713921 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.713938 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.713962 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.713980 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.817498 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.817555 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.817575 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.817600 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.817618 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.920640 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.920690 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.920711 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.920735 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.920752 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:22Z","lastTransitionTime":"2026-02-01T07:24:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.924799 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 13:50:38.493798521 +0000 UTC Feb 01 07:24:22 crc kubenswrapper[4650]: I0201 07:24:22.964225 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:22 crc kubenswrapper[4650]: E0201 07:24:22.964385 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.024511 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.024573 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.024595 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.024620 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.024637 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.128156 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.128250 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.128268 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.128294 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.128312 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.231447 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.231505 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.231521 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.231547 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.231564 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.334917 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.334984 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.335001 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.335053 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.335071 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.440662 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.440718 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.440735 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.440759 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.440776 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.543566 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.543634 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.543653 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.543679 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.543700 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.653645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.653715 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.653739 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.653763 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.653781 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.758164 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.758222 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.758243 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.758275 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.758298 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.861057 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.861103 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.861119 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.861142 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.861159 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.925109 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 03:33:03.048607624 +0000 UTC Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.964585 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.964605 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.965062 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.965097 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.965114 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.965138 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.965155 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:23Z","lastTransitionTime":"2026-02-01T07:24:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:23 crc kubenswrapper[4650]: E0201 07:24:23.965210 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:23 crc kubenswrapper[4650]: I0201 07:24:23.964659 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:23 crc kubenswrapper[4650]: E0201 07:24:23.965328 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:23 crc kubenswrapper[4650]: E0201 07:24:23.965784 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.067737 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.067796 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.067818 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.067841 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.067860 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.170751 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.170798 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.170817 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.170842 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.170860 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.273423 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.273467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.273483 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.273509 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.273527 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.376781 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.376822 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.376838 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.376857 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.376873 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.480017 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.480095 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.480111 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.480131 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.480147 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.583374 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.583551 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.583577 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.583598 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.583656 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.686867 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.686913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.686930 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.686953 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.686971 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.789776 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.789817 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.789831 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.789851 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.789870 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.893239 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.893295 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.893311 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.893332 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.893348 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.925846 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 09:23:17.331309689 +0000 UTC Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.964826 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:24 crc kubenswrapper[4650]: E0201 07:24:24.965013 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.996243 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.996285 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.996301 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.996324 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:24 crc kubenswrapper[4650]: I0201 07:24:24.996341 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:24Z","lastTransitionTime":"2026-02-01T07:24:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.099798 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.099843 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.099861 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.099884 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.099899 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.202602 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.202658 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.202678 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.202703 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.202722 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.306208 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.306310 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.306334 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.306364 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.306386 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.409364 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.409411 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.409422 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.409438 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.409451 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.512292 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.512335 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.512346 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.512361 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.512372 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.615277 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.615324 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.615338 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.615355 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.615367 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.719228 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.719317 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.719336 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.719360 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.719376 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.822315 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.822368 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.822385 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.822418 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.822434 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.924332 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.924378 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.924395 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.924424 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.924442 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:25Z","lastTransitionTime":"2026-02-01T07:24:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:25 crc kubenswrapper[4650]: I0201 07:24:25.926665 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 12:59:41.878528599 +0000 UTC Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.007222 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.007271 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:26 crc kubenswrapper[4650]: E0201 07:24:26.007368 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:26 crc kubenswrapper[4650]: E0201 07:24:26.007570 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.007612 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:26 crc kubenswrapper[4650]: E0201 07:24:26.007733 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.026310 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.026369 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.026387 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.026412 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.026432 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.145398 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.145460 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.145470 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.145483 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.145491 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.247212 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.247243 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.247250 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.247264 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.247273 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.445391 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.445456 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.445482 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.445512 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.445534 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.547561 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.547595 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.547604 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.547618 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.547628 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.650480 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.650517 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.650525 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.650540 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.650552 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.752917 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.752946 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.752954 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.752967 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.752975 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.855830 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.855865 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.855875 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.855889 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.855901 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.928070 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 13:40:38.854736358 +0000 UTC Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.958482 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.958549 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.958568 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.958596 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.958616 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:26Z","lastTransitionTime":"2026-02-01T07:24:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:26 crc kubenswrapper[4650]: I0201 07:24:26.965234 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:26 crc kubenswrapper[4650]: E0201 07:24:26.965447 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.061258 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.061301 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.061314 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.061331 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.061342 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.164580 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.164618 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.164631 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.164648 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.164660 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.267665 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.267695 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.267704 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.267716 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.267726 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.369287 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.369323 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.369332 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.369353 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.369364 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.471859 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.471917 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.471929 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.471945 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.471958 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.575089 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.575136 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.575149 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.575164 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.575173 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.678133 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.678177 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.678186 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.678204 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.678214 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.782141 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.782228 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.782250 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.782277 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.782298 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.884883 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.884948 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.884967 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.884996 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.885014 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.928635 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 23:15:12.624179921 +0000 UTC Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.965274 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.965370 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:27 crc kubenswrapper[4650]: E0201 07:24:27.965476 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:27 crc kubenswrapper[4650]: E0201 07:24:27.965559 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.965688 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:27 crc kubenswrapper[4650]: E0201 07:24:27.965808 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.988098 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.988151 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.988165 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.988184 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:27 crc kubenswrapper[4650]: I0201 07:24:27.988195 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:27Z","lastTransitionTime":"2026-02-01T07:24:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.090929 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.091068 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.091095 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.091124 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.091146 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.193708 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.193741 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.193750 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.193764 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.193775 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.296312 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.296356 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.296367 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.296382 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.296393 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.399279 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.399320 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.399330 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.399350 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.399359 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.502750 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.502795 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.502810 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.502833 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.502846 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.605149 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.605191 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.605202 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.605221 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.605234 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.707780 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.707817 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.707827 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.707845 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.707855 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.812188 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.812240 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.812258 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.812284 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.812301 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.915245 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.915300 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.915317 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.915342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.915362 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:28Z","lastTransitionTime":"2026-02-01T07:24:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.929105 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 01:17:03.706273746 +0000 UTC Feb 01 07:24:28 crc kubenswrapper[4650]: I0201 07:24:28.965004 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:28 crc kubenswrapper[4650]: E0201 07:24:28.965192 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.018342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.018393 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.018403 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.018420 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.018431 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.121735 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.122237 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.122254 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.122275 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.122290 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.224932 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.224988 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.225006 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.225054 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.225070 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.296878 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.296932 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.296947 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.296968 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.296982 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.312858 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:29Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.318583 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.318627 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.318640 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.318662 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.318675 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.334074 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:29Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.338890 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.338941 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.338952 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.338970 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.338981 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.351732 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:29Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.355913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.355949 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.355959 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.355978 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.355990 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.368006 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:29Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.371452 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.371487 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.371501 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.371525 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.371538 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.384623 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:29Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.385173 4650 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.386960 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.387075 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.387142 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.387222 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.387289 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.490085 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.490618 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.490724 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.490828 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.490963 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.557528 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.557712 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.557789 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:25:01.557764696 +0000 UTC m=+100.280862951 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.594167 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.594215 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.594227 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.594249 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.594261 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.697314 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.697760 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.697912 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.698005 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.698102 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.800631 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.801045 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.801119 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.801216 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.801311 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.904155 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.904229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.904244 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.904268 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.904286 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:29Z","lastTransitionTime":"2026-02-01T07:24:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.931176 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 12:42:00.107495265 +0000 UTC Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.964906 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.964906 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.965083 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.965123 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:29 crc kubenswrapper[4650]: I0201 07:24:29.964917 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:29 crc kubenswrapper[4650]: E0201 07:24:29.965193 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.007247 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.007313 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.007333 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.007356 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.007377 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.110159 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.110229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.110239 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.110257 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.110270 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.213092 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.213134 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.213144 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.213163 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.213178 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.325256 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.325316 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.325335 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.325362 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.325381 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.432942 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.432985 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.432994 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.433014 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.433045 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.535730 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.535772 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.535787 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.535807 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.535821 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.639495 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.639534 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.639544 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.639560 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.639571 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.743243 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.743290 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.743301 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.743318 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.743330 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.848732 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.848778 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.848788 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.848806 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.848817 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.932668 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 03:42:28.849464917 +0000 UTC Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.951340 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.951573 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.951676 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.951784 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.951878 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:30Z","lastTransitionTime":"2026-02-01T07:24:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:30 crc kubenswrapper[4650]: I0201 07:24:30.964623 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:30 crc kubenswrapper[4650]: E0201 07:24:30.964757 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.055720 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.055776 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.055793 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.055813 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.055829 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.158411 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.158458 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.158466 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.158484 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.158498 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.261565 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.261606 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.261618 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.261633 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.261643 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.366721 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.366784 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.366794 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.366811 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.366822 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.470595 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.470646 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.470667 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.470687 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.470700 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.552539 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/0.log" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.552599 4650 generic.go:334] "Generic (PLEG): container finished" podID="e408ebb2-07fc-4317-92d4-1316ece830fb" containerID="36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98" exitCode=1 Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.552641 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerDied","Data":"36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.553186 4650 scope.go:117] "RemoveContainer" containerID="36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.569124 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"2026-02-01T07:23:45+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2\\\\n2026-02-01T07:23:45+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2 to /host/opt/cni/bin/\\\\n2026-02-01T07:23:46Z [verbose] multus-daemon started\\\\n2026-02-01T07:23:46Z [verbose] Readiness Indicator file check\\\\n2026-02-01T07:24:31Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.574321 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.574351 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.574361 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.574379 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.574393 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.586677 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.600065 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.619376 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.646616 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.664715 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.676359 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.676397 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.676407 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.676427 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.676440 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.678004 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.688431 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.702448 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.752565 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.780184 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.780223 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.780237 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.780252 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.780260 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.791442 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.856079 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.869717 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.883270 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.883318 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.883330 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.883351 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.883364 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.886520 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.898665 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.911418 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.927665 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.934726 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 01:18:14.586277721 +0000 UTC Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.965187 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.965227 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:31 crc kubenswrapper[4650]: E0201 07:24:31.965329 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:31 crc kubenswrapper[4650]: E0201 07:24:31.965410 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.965772 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:31 crc kubenswrapper[4650]: E0201 07:24:31.965832 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.966239 4650 scope.go:117] "RemoveContainer" containerID="ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.998098 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:31Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.998984 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.999011 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.999020 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.999052 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:31 crc kubenswrapper[4650]: I0201 07:24:31.999064 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:31Z","lastTransitionTime":"2026-02-01T07:24:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.013170 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.024985 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"2026-02-01T07:23:45+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2\\\\n2026-02-01T07:23:45+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2 to /host/opt/cni/bin/\\\\n2026-02-01T07:23:46Z [verbose] multus-daemon started\\\\n2026-02-01T07:23:46Z [verbose] Readiness Indicator file check\\\\n2026-02-01T07:24:31Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.039973 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.054865 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.087259 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.101525 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.102152 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.102198 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.102211 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.102230 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.102242 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.123060 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.138114 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.152360 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.164502 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.181710 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.196107 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.204814 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.204849 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.204860 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.204878 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.204890 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.210898 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.228068 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.247077 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.263308 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.308770 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.308819 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.308830 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.308851 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.308869 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.413524 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.414019 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.414153 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.414227 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.414289 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.534215 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.534263 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.534273 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.534290 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.534300 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.561077 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/2.log" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.565905 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.566375 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.569199 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/0.log" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.569243 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerStarted","Data":"c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.584852 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.599450 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.617981 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.636533 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.636563 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.636571 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.636584 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.636592 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.640713 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.662256 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.681044 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.700310 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.716742 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"2026-02-01T07:23:45+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2\\\\n2026-02-01T07:23:45+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2 to /host/opt/cni/bin/\\\\n2026-02-01T07:23:46Z [verbose] multus-daemon started\\\\n2026-02-01T07:23:46Z [verbose] Readiness Indicator file check\\\\n2026-02-01T07:24:31Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.733611 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.740766 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.740800 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.740812 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.740829 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.740842 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.749333 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.769340 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.794274 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.816586 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.833846 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.843956 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.843998 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.844010 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.844050 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.844063 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.847896 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.863017 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.885742 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.902227 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.935345 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 02:25:37.431853192 +0000 UTC Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.937985 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.950133 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.950213 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.950229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.950254 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.950270 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:32Z","lastTransitionTime":"2026-02-01T07:24:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.964408 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:32 crc kubenswrapper[4650]: E0201 07:24:32.964580 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.967137 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.980703 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:32 crc kubenswrapper[4650]: I0201 07:24:32.997572 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:32Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.053218 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.053264 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.053274 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.053291 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.053305 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.070050 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.089984 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.107045 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.120587 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.133207 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.146975 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.156479 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.156522 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.156533 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.156551 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.156562 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.160747 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.174370 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"2026-02-01T07:23:45+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2\\\\n2026-02-01T07:23:45+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2 to /host/opt/cni/bin/\\\\n2026-02-01T07:23:46Z [verbose] multus-daemon started\\\\n2026-02-01T07:23:46Z [verbose] Readiness Indicator file check\\\\n2026-02-01T07:24:31Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.187192 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.200159 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.215908 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.238334 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.260386 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.260443 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.260460 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.260487 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.260500 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.363073 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.363125 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.363137 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.363160 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.363174 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.467156 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.467229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.467240 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.467265 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.467275 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.571993 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.572453 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.572753 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.572956 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.573095 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.573717 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/3.log" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.574629 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/2.log" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.577218 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" exitCode=1 Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.577323 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.577410 4650 scope.go:117] "RemoveContainer" containerID="ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.578273 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:24:33 crc kubenswrapper[4650]: E0201 07:24:33.578565 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.596384 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.618628 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.641547 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.664855 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.677786 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.678156 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.678277 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.678403 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.681141 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.684008 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.700696 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.716555 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"2026-02-01T07:23:45+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2\\\\n2026-02-01T07:23:45+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2 to /host/opt/cni/bin/\\\\n2026-02-01T07:23:46Z [verbose] multus-daemon started\\\\n2026-02-01T07:23:46Z [verbose] Readiness Indicator file check\\\\n2026-02-01T07:24:31Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.732488 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.743111 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.755322 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.779679 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad763e042a375d4fbba7342085bc0c8ff1a13b5a5c5e26a7e1a782a6aec531c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:07Z\\\",\\\"message\\\":\\\"vn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0201 07:24:07.954894 6189 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0201 07:24:07.955019 6189 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:33Z\\\",\\\"message\\\":\\\"33.047608 6529 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-hm5cs in node crc\\\\nI0201 07:24:33.047613 6529 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-hm5cs after 0 failed attempt(s)\\\\nI0201 07:24:33.047618 6529 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-hm5cs\\\\nI0201 07:24:33.047626 6529 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI0201 07:24:33.047631 6529 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI0201 07:24:33.047636 6529 ovn.go:134] Ensuring zone local for Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g in node crc\\\\nI0201 07:24:33.047654 6529 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0201 07:24:33.047664 6529 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nF0201 07:24:33.047754 6529 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.784429 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.784485 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.784495 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.784510 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.784519 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.794043 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.811108 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.832350 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.845722 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.864859 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.878801 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:33Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.887097 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.887152 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.887167 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.887192 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.887208 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.935803 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 19:56:57.522669969 +0000 UTC Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.964376 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.964411 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:33 crc kubenswrapper[4650]: E0201 07:24:33.964514 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.964379 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:33 crc kubenswrapper[4650]: E0201 07:24:33.964795 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:33 crc kubenswrapper[4650]: E0201 07:24:33.964875 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.992585 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.992626 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.992636 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.992652 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:33 crc kubenswrapper[4650]: I0201 07:24:33.992662 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:33Z","lastTransitionTime":"2026-02-01T07:24:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.095552 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.095610 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.095620 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.095638 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.095650 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.198702 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.198749 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.198758 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.198774 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.198785 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.302184 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.302283 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.302302 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.302328 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.302346 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.406008 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.406125 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.406145 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.406173 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.406192 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.509745 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.509858 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.509912 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.509943 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.509961 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.583811 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/3.log" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.589908 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:24:34 crc kubenswrapper[4650]: E0201 07:24:34.590172 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.612304 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.612348 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.612361 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.612380 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.612393 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.617209 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:33Z\\\",\\\"message\\\":\\\"33.047608 6529 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-hm5cs in node crc\\\\nI0201 07:24:33.047613 6529 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-hm5cs after 0 failed attempt(s)\\\\nI0201 07:24:33.047618 6529 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-hm5cs\\\\nI0201 07:24:33.047626 6529 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI0201 07:24:33.047631 6529 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI0201 07:24:33.047636 6529 ovn.go:134] Ensuring zone local for Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g in node crc\\\\nI0201 07:24:33.047654 6529 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0201 07:24:33.047664 6529 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nF0201 07:24:33.047754 6529 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.635752 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.647712 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.662194 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.679217 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.693536 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.715280 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.715339 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.715349 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.715367 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.715393 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.716724 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.730571 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.748340 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.762359 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.805817 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.818530 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.818605 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.818625 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.818658 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.818679 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.828075 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.855084 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"2026-02-01T07:23:45+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2\\\\n2026-02-01T07:23:45+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2 to /host/opt/cni/bin/\\\\n2026-02-01T07:23:46Z [verbose] multus-daemon started\\\\n2026-02-01T07:23:46Z [verbose] Readiness Indicator file check\\\\n2026-02-01T07:24:31Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.873148 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.895304 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.912740 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.922142 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.922199 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.922219 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.922242 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.922255 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:34Z","lastTransitionTime":"2026-02-01T07:24:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.928788 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:34Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.936929 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 13:48:34.498194116 +0000 UTC Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.964264 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:34 crc kubenswrapper[4650]: E0201 07:24:34.964967 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:34 crc kubenswrapper[4650]: I0201 07:24:34.977715 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.026111 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.026183 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.026196 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.026216 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.026227 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.129545 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.129605 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.129618 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.129641 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.129653 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.232618 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.232662 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.232674 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.232693 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.232706 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.335146 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.335194 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.335209 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.335230 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.335241 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.439247 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.439296 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.439309 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.439328 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.439342 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.543414 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.543479 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.543502 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.543528 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.543583 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.647292 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.647342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.647373 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.647393 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.647406 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.750581 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.750661 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.750682 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.750708 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.750788 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.854391 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.854708 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.854769 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.854830 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.854897 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.937819 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 12:09:56.832834233 +0000 UTC Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.958921 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.958982 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.959003 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.959074 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.959095 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:35Z","lastTransitionTime":"2026-02-01T07:24:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.964333 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.964470 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:35 crc kubenswrapper[4650]: E0201 07:24:35.964497 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:35 crc kubenswrapper[4650]: E0201 07:24:35.964682 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:35 crc kubenswrapper[4650]: I0201 07:24:35.964926 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:35 crc kubenswrapper[4650]: E0201 07:24:35.965250 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.062010 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.062130 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.062152 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.062178 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.062197 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.165888 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.165958 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.165980 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.166010 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.166088 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.269062 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.269115 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.269128 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.269147 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.269160 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.373271 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.373339 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.373358 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.373383 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.373403 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.476198 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.476258 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.476275 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.476299 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.476312 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.579238 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.579278 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.579287 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.579300 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.579312 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.685206 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.685251 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.685261 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.685283 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.685294 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.789919 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.790354 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.790366 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.790386 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.790399 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.893237 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.893317 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.893334 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.893363 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.893384 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.938441 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 10:33:11.796550834 +0000 UTC Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.964835 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:36 crc kubenswrapper[4650]: E0201 07:24:36.966061 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.996835 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.997301 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.997405 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.997504 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:36 crc kubenswrapper[4650]: I0201 07:24:36.997598 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:36Z","lastTransitionTime":"2026-02-01T07:24:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.103797 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.103860 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.103877 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.103903 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.103921 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.207311 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.207718 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.207885 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.207966 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.208055 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.311567 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.311625 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.311664 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.311690 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.311702 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.414055 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.414470 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.414560 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.414655 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.414726 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.517844 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.517903 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.517912 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.517929 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.517942 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.621100 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.621151 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.621163 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.621180 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.621192 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.723399 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.723757 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.723834 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.723935 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.724017 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.827703 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.827801 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.827827 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.827860 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.827880 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.931646 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.931713 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.931730 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.931755 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.931772 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:37Z","lastTransitionTime":"2026-02-01T07:24:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.939119 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 00:38:56.040536896 +0000 UTC Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.965127 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.965367 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:37 crc kubenswrapper[4650]: E0201 07:24:37.965571 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:37 crc kubenswrapper[4650]: E0201 07:24:37.965883 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:37 crc kubenswrapper[4650]: I0201 07:24:37.966165 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:37 crc kubenswrapper[4650]: E0201 07:24:37.966446 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.035714 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.036132 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.036250 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.036594 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.036853 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.141315 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.141400 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.141431 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.141467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.141493 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.244318 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.244364 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.244373 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.244388 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.244402 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.346640 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.346700 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.346713 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.346741 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.346756 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.450998 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.451090 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.451105 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.451127 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.451141 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.555342 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.555426 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.555440 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.555470 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.555486 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.658801 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.658851 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.658865 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.658884 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.658897 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.761760 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.761887 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.761913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.761996 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.762020 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.865443 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.865528 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.865551 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.865588 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.865606 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.939585 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 19:07:14.023351206 +0000 UTC Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.964305 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:38 crc kubenswrapper[4650]: E0201 07:24:38.964553 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.969467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.969544 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.969563 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.969593 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:38 crc kubenswrapper[4650]: I0201 07:24:38.969612 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:38Z","lastTransitionTime":"2026-02-01T07:24:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.073470 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.073531 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.073544 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.073565 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.073580 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.179157 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.179217 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.179229 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.179252 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.179266 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.284148 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.284197 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.284210 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.284227 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.284239 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.388704 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.388782 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.388803 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.388831 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.388855 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.394844 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.394917 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.394938 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.394972 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.395017 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.419301 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:39Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.426285 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.426356 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.426370 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.426395 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.426408 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.441288 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:39Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.445866 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.445936 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.445949 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.445966 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.445976 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.459217 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:39Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.464119 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.464169 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.464182 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.464203 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.464216 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.482820 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:39Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.486757 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.486788 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.486799 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.486815 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.486825 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.502393 4650 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9f5f39b4-df24-4fd8-bc0a-6661ddd50241\\\",\\\"systemUUID\\\":\\\"df837a87-3594-4d79-9122-32f12f83a642\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:39Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.502650 4650 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.504691 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.504760 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.504772 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.504801 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.504814 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.608387 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.608438 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.608455 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.608478 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.608497 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.716860 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.719213 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.719245 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.719272 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.719291 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.822218 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.822284 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.822300 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.822320 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.822334 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.926379 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.926437 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.926453 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.926479 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.926497 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:39Z","lastTransitionTime":"2026-02-01T07:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.940406 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 20:16:42.414023034 +0000 UTC Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.965318 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.965580 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.965925 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.966072 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:39 crc kubenswrapper[4650]: I0201 07:24:39.966286 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:39 crc kubenswrapper[4650]: E0201 07:24:39.966442 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.030488 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.030577 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.030604 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.030642 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.030669 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.134517 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.134599 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.134611 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.134638 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.134653 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.238877 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.238989 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.239010 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.239447 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.239470 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.342495 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.342547 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.342560 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.342582 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.342594 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.446599 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.446672 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.446691 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.446718 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.446740 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.549878 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.549936 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.549954 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.549980 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.550010 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.653516 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.653555 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.653566 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.653580 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.653588 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.757286 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.757497 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.757524 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.757549 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.757573 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.861313 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.861374 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.861393 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.861465 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.861485 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.940943 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 06:05:24.207803623 +0000 UTC Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.964945 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:40 crc kubenswrapper[4650]: E0201 07:24:40.965298 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.965748 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.965793 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.965812 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.965834 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.965851 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:40Z","lastTransitionTime":"2026-02-01T07:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:40 crc kubenswrapper[4650]: I0201 07:24:40.997374 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.069564 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.069626 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.069639 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.069655 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.069665 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.174797 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.175277 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.175439 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.175630 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.175783 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.278822 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.279326 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.279498 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.279682 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.279868 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.383596 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.383650 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.383662 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.383680 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.383693 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.487915 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.487962 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.487971 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.487986 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.488000 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.591563 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.591627 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.591645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.591672 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.591689 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.694472 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.694536 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.694558 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.694584 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.694605 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.797999 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.798107 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.798131 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.798196 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.798220 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.901859 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.901925 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.901940 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.901963 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.901979 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:41Z","lastTransitionTime":"2026-02-01T07:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.941545 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 05:01:35.356993346 +0000 UTC Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.965289 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.965354 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.965322 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:41 crc kubenswrapper[4650]: E0201 07:24:41.965798 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:41 crc kubenswrapper[4650]: E0201 07:24:41.965988 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:41 crc kubenswrapper[4650]: E0201 07:24:41.966227 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:41 crc kubenswrapper[4650]: I0201 07:24:41.991959 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0ea3e95-72a7-4a87-ab12-6c31f7befe3b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2efe9bd6b22f537dd8b2a8d141bd93cccf9205105730bda2e7454f180efda84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://681dc37b0b2d54636d6a4e42d515778b4e011945ac39d50537ecb988919ffac7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://566253d2268482baab6b9f52f3989afe3706c00c8bf7c7abb6976ecb4ee26972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f681fdd9550c2d2699117881334298e47e1e82e81d52a71a52f112df5108aba4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eed3cb895d54b9db37dfd16818d9ea84a23ace784d69180de78f6be5e6665b1f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe6f08ae9c74a5d8d9160fbd7431bd79ab24aef2b0c7c536abfa5a89769c0013\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1a9e6016241906aff7f15625781dc518bd05f82a9b46e2f298502a8f541b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lt7jl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bvkr8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:41Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.007134 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.007183 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.007241 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.007268 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.007284 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.010931 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-gz868" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b949fef4-4a92-4734-8edc-4c9f9b2515af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ab38866c55b6d278558e4683033197b42322d120dfbc41954ebab4fe11fb65f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f7l7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:46Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-gz868\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.032279 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"987b2d65-e234-4350-9aa3-abbd99a6ca8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a0d39de8b1241029e7dd2371560c6af02d9af969e1759903e2d2e290fff062f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e8017f3e9b14a886cf1626502c83bbc1b65c3912056d2dfb6cde85f92e2201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-26sdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-mscbj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.056176 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4593d40-c6e1-42fa-8c18-053ff31304b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:57Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mbbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:57Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jvgsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.082499 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a199b63-9a1e-4f49-92ef-bdadf4f333b9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41b74922a03b7e0181459f5ceae7d0fa3f44d22343e7060375b909547ca718b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a24bc997c46ab92b11fd78aa3b2f091e944923b74dd7970a3f68a9070d8553\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2061d447222e30385d520b018ccbb80b372ecbedf5b18b5e60863abfe2ed3ed4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d37618dee4c5505345fbb3dafbb8bb3673a16053c688525953a673e57c1be0e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.104997 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.110802 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.110930 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.110958 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.110992 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.111018 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.134439 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1047763-6905-4170-88d8-be63f9fde78f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca2252d7d2c7b200da374f9b537472556436856e8a048bb3c4d85d71b474c1ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a9b4419798ee164052de5c30996fd93209ae532d70838d4086f7bab2c5766c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53565e4e6e9bd72395f7216877be07928e66899c4c70bc20a14c05b3c37fee26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85cce18d0dde8ecd1aae782f56766ea995b69e891ec7880f5cf12133f8740ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://910ec686da1b92d850607d6b7e8440a8161587dbc664e2b7eaa019d3610e7e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5aa63dcc54aa1e3865f7264a674a0369cd70fa99a76905bbf6d261193d05a4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5aa63dcc54aa1e3865f7264a674a0369cd70fa99a76905bbf6d261193d05a4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca7ad6baa9756be131a6d29e2c3d9067f73306fe82d654e0808d7fa717085e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca7ad6baa9756be131a6d29e2c3d9067f73306fe82d654e0808d7fa717085e82\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://75bb7bb516c4ba655a7e17cf1bb6159c33c2cdacc4fe7da56bf28b79df4893bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://75bb7bb516c4ba655a7e17cf1bb6159c33c2cdacc4fe7da56bf28b79df4893bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.155803 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c13d434f4cc4412b206a9737fc198a6809051b0aab8481fb07f04eec46aad6d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.174391 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.192612 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://788a1718d31b01c4e16cc640e583635f74af510c5be7f7a6eeaff9fcec6f8332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ea205dc6e938703ca2d8b5e1b6c60e22932f010c603ca35c99517abd83372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.209011 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7470133bcca739c64dbd071e93119a7fb6bac2c76b86b47815b5db71e3353d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.213598 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.213653 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.213666 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.213684 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.213696 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.224814 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k6xtw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e408ebb2-07fc-4317-92d4-1316ece830fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:31Z\\\",\\\"message\\\":\\\"2026-02-01T07:23:45+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2\\\\n2026-02-01T07:23:45+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_70f99792-1cee-4ed6-b4f6-a35cb3859cc2 to /host/opt/cni/bin/\\\\n2026-02-01T07:23:46Z [verbose] multus-daemon started\\\\n2026-02-01T07:23:46Z [verbose] Readiness Indicator file check\\\\n2026-02-01T07:24:31Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8x24c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k6xtw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.238990 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f652440c-d98c-4d56-bdf1-4d7c835496d1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0201 07:23:35.454011 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0201 07:23:35.457178 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1003398465/tls.crt::/tmp/serving-cert-1003398465/tls.key\\\\\\\"\\\\nI0201 07:23:41.553972 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0201 07:23:41.569431 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0201 07:23:41.569478 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0201 07:23:41.569545 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0201 07:23:41.569557 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0201 07:23:41.584313 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0201 07:23:41.584360 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584371 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0201 07:23:41.584379 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0201 07:23:41.584385 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0201 07:23:41.584392 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0201 07:23:41.584399 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0201 07:23:41.584857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0201 07:23:41.587302 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.254432 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"469da179-7975-4fdf-af51-125dc38509e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f68bccd913b9c25028ff8adad5cf0cc4ea3fd358a26920134b05de671192333d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c233e54b775fb06349bd9f564e0b167d343524ab27ee6f8b677dbcd0d2ec2a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce8abf2b19c640c03befed9ddd802aabf58bd8c7f3eb0716b255d0d42e4e6c61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.264521 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nlgpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"478bcf82-4ee9-40c3-af5e-eb4731802b79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1dd8b8fc32cd2196deb67150a945afb5bec2b5a533e621181c8053f66edca2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72x9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nlgpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.277799 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8dd1b5da-94bb-4bf2-8fed-958df80a8806\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d224b0ae69e3c1643d9408409b6b3cdc9acdfbbd681cba4a9c9da0ce8089487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzwg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xfq9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.298656 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-01T07:24:33Z\\\",\\\"message\\\":\\\"33.047608 6529 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-hm5cs in node crc\\\\nI0201 07:24:33.047613 6529 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-hm5cs after 0 failed attempt(s)\\\\nI0201 07:24:33.047618 6529 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-hm5cs\\\\nI0201 07:24:33.047626 6529 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI0201 07:24:33.047631 6529 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI0201 07:24:33.047636 6529 ovn.go:134] Ensuring zone local for Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g in node crc\\\\nI0201 07:24:33.047654 6529 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0201 07:24:33.047664 6529 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nF0201 07:24:33.047754 6529 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-01T07:24:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nvhzk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hm5cs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.310571 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9f4434f-9f51-4a25-aa98-261a99c5ac4b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de423d103d88032359f0901b1ae4f11fe931dfaafbc8842536e22f9b73a2343b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-01T07:23:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae08477a377b8a9970bdae5945412b83b02d682662656bba65f1a285b5a4e01a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae08477a377b8a9970bdae5945412b83b02d682662656bba65f1a285b5a4e01a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-01T07:23:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-01T07:23:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-01T07:23:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.316185 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.316243 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.316256 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.316276 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.316289 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.324506 4650 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-01T07:23:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-01T07:24:42Z is after 2025-08-24T17:21:41Z" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.419678 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.420212 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.420416 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.420592 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.420841 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.523908 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.523952 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.523963 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.523983 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.523995 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.627269 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.627713 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.627857 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.628006 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.628176 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.731888 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.732452 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.732600 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.732739 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.732871 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.836060 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.836112 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.836129 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.836190 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.836253 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.938526 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.939083 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.939336 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.939634 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.939816 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:42Z","lastTransitionTime":"2026-02-01T07:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.942659 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 02:42:15.747077211 +0000 UTC Feb 01 07:24:42 crc kubenswrapper[4650]: I0201 07:24:42.965155 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:42 crc kubenswrapper[4650]: E0201 07:24:42.965738 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.043298 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.043328 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.043337 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.043350 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.043358 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.146899 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.147250 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.147325 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.147435 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.147557 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.250118 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.250154 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.250164 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.250180 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.250194 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.352652 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.352970 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.353063 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.353150 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.353229 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.456564 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.456616 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.456626 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.456645 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.456656 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.560228 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.560595 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.560779 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.560992 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.561190 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.665208 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.665251 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.665260 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.665276 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.665286 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.769543 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.769731 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.769755 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.769823 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.769847 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.873964 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.874150 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.874176 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.874205 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.874225 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.943459 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 11:44:30.537533192 +0000 UTC Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.964991 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.965131 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.965217 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:43 crc kubenswrapper[4650]: E0201 07:24:43.965709 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:43 crc kubenswrapper[4650]: E0201 07:24:43.965699 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:43 crc kubenswrapper[4650]: E0201 07:24:43.966131 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.977281 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.977362 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.977382 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.977409 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:43 crc kubenswrapper[4650]: I0201 07:24:43.977430 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:43Z","lastTransitionTime":"2026-02-01T07:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.081819 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.081885 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.081908 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.081942 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.081966 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.184767 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.185266 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.185333 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.185395 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.185459 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.287982 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.288058 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.288078 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.288102 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.288120 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.392142 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.392219 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.392257 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.392289 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.392311 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.495096 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.495146 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.495165 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.495189 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.495209 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.605845 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.605929 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.606821 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.607072 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.608006 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.711886 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.711941 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.711952 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.711972 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.711984 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.814911 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.815010 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.815088 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.815140 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.815159 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.919657 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.919784 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.919816 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.919850 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.919880 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:44Z","lastTransitionTime":"2026-02-01T07:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.943633 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 10:59:40.449706536 +0000 UTC Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.965261 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:44 crc kubenswrapper[4650]: E0201 07:24:44.966120 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:44 crc kubenswrapper[4650]: I0201 07:24:44.966443 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:24:44 crc kubenswrapper[4650]: E0201 07:24:44.966765 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.023835 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.023902 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.023913 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.023932 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.023945 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.127401 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.127467 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.127489 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.127518 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.127540 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.230632 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.230713 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.230751 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.230785 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.230806 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.334559 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.334635 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.334659 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.334690 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.334711 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.437639 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.437709 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.437731 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.437767 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.437791 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.540659 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.540699 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.540708 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.540727 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.540738 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.643175 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.643221 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.643230 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.643245 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.643254 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.707371 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.707516 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707642 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707700 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707726 4650 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707648 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707798 4650 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707867 4650 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707827 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.70779685 +0000 UTC m=+148.430895135 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.707989 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.707927313 +0000 UTC m=+148.431025568 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.746295 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.746552 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.746635 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.746719 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.746802 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.850232 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.850336 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.850359 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.850383 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.850405 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.910779 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.910968 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.911023 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.911242 4650 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.911310 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.91126916 +0000 UTC m=+148.634367435 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.911357 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.911344512 +0000 UTC m=+148.634442787 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.911512 4650 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.911570 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.911558207 +0000 UTC m=+148.634656492 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.944107 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 18:00:42.652535752 +0000 UTC Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.953571 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.953642 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.953656 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.953676 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.953689 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:45Z","lastTransitionTime":"2026-02-01T07:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.965276 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.965350 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:45 crc kubenswrapper[4650]: I0201 07:24:45.965508 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.965687 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.965763 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:45 crc kubenswrapper[4650]: E0201 07:24:45.965958 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.056277 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.056317 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.056325 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.056341 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.056350 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.158729 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.158795 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.158814 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.158841 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.158859 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.261788 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.261853 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.261870 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.261894 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.261911 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.364590 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.364632 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.364642 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.364676 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.364686 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.466762 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.466857 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.466874 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.466929 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.466948 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.570054 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.570105 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.570156 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.570178 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.570321 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.674197 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.674315 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.674338 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.674362 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.674384 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.777769 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.777846 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.777860 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.777881 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.777904 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.881292 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.881357 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.881379 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.881409 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.881439 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.944967 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 05:26:34.31847186 +0000 UTC Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.965238 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:46 crc kubenswrapper[4650]: E0201 07:24:46.965519 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.984878 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.984934 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.984946 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.984967 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:46 crc kubenswrapper[4650]: I0201 07:24:46.984979 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:46Z","lastTransitionTime":"2026-02-01T07:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.087572 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.087620 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.087635 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.087656 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.087671 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.190131 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.190203 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.190219 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.190247 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.190267 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.294109 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.294181 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.294201 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.294228 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.294250 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.398949 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.399090 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.399111 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.399147 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.399166 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.502092 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.502166 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.502185 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.502212 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.502231 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.606428 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.606495 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.606512 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.606539 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.606559 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.710693 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.710739 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.710750 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.710768 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.710780 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.814088 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.814135 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.814147 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.814168 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.814186 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.917377 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.917427 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.917438 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.917464 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.917477 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:47Z","lastTransitionTime":"2026-02-01T07:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.945523 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 07:12:16.402573336 +0000 UTC Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.965212 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.965763 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:47 crc kubenswrapper[4650]: I0201 07:24:47.965930 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:47 crc kubenswrapper[4650]: E0201 07:24:47.966073 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:47 crc kubenswrapper[4650]: E0201 07:24:47.966215 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:47 crc kubenswrapper[4650]: E0201 07:24:47.966388 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.020436 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.020474 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.020482 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.020500 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.020511 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.124370 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.124436 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.124449 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.124472 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.124490 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.228345 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.228414 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.228433 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.228463 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.228479 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.333494 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.333551 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.333563 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.333588 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.333603 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.436355 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.436641 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.436651 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.436674 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.436684 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.541240 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.541328 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.541348 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.541437 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.541456 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.644338 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.644711 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.644795 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.644897 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.645063 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.747565 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.747634 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.747654 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.747681 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.747695 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.850651 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.850720 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.850738 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.850767 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.850789 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.945957 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 09:33:37.873512334 +0000 UTC Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.953601 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.953669 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.953700 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.953739 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.953763 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:48Z","lastTransitionTime":"2026-02-01T07:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:48 crc kubenswrapper[4650]: I0201 07:24:48.964606 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:48 crc kubenswrapper[4650]: E0201 07:24:48.965095 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.058022 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.058134 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.058159 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.058190 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.058212 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:49Z","lastTransitionTime":"2026-02-01T07:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.161204 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.161272 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.161296 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.161330 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.161356 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:49Z","lastTransitionTime":"2026-02-01T07:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.264635 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.264703 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.264723 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.264753 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.264773 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:49Z","lastTransitionTime":"2026-02-01T07:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.368449 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.368869 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.368962 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.369066 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.369150 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:49Z","lastTransitionTime":"2026-02-01T07:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.472572 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.472637 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.472653 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.472675 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.472687 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:49Z","lastTransitionTime":"2026-02-01T07:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.575679 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.575731 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.575749 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.575769 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.575784 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:49Z","lastTransitionTime":"2026-02-01T07:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.587561 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.587601 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.587614 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.587628 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.587640 4650 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-01T07:24:49Z","lastTransitionTime":"2026-02-01T07:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.657690 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq"] Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.658857 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.662853 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.663401 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.663693 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.671728 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.720854 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=9.720831087 podStartE2EDuration="9.720831087s" podCreationTimestamp="2026-02-01 07:24:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:49.720239342 +0000 UTC m=+88.443337617" watchObservedRunningTime="2026-02-01 07:24:49.720831087 +0000 UTC m=+88.443929342" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.755119 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.755620 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.755897 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.756255 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.756517 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.818431 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-k6xtw" podStartSLOduration=67.818402495 podStartE2EDuration="1m7.818402495s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:49.817433441 +0000 UTC m=+88.540531726" watchObservedRunningTime="2026-02-01 07:24:49.818402495 +0000 UTC m=+88.541500780" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.855089 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=67.85505636 podStartE2EDuration="1m7.85505636s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:49.853708006 +0000 UTC m=+88.576806271" watchObservedRunningTime="2026-02-01 07:24:49.85505636 +0000 UTC m=+88.578154645" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.857527 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.857829 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.857862 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.857905 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.857950 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.858570 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.860168 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.860423 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.875871 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=66.8758313 podStartE2EDuration="1m6.8758313s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:49.874690981 +0000 UTC m=+88.597789256" watchObservedRunningTime="2026-02-01 07:24:49.8758313 +0000 UTC m=+88.598929575" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.884157 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.890575 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e956e84-1ed8-43f6-8a64-3ac18a2db5bb-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7h8xq\" (UID: \"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.900806 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-nlgpt" podStartSLOduration=67.900789327 podStartE2EDuration="1m7.900789327s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:49.900042408 +0000 UTC m=+88.623140683" watchObservedRunningTime="2026-02-01 07:24:49.900789327 +0000 UTC m=+88.623887592" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.917169 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podStartSLOduration=67.917139164 podStartE2EDuration="1m7.917139164s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:49.915732408 +0000 UTC m=+88.638830653" watchObservedRunningTime="2026-02-01 07:24:49.917139164 +0000 UTC m=+88.640237409" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.946609 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 08:06:40.595750812 +0000 UTC Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.946981 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.959746 4650 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.965246 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.965296 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:49 crc kubenswrapper[4650]: E0201 07:24:49.965393 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.965471 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:49 crc kubenswrapper[4650]: E0201 07:24:49.965593 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:49 crc kubenswrapper[4650]: E0201 07:24:49.965664 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:49 crc kubenswrapper[4650]: I0201 07:24:49.991648 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=15.991630074 podStartE2EDuration="15.991630074s" podCreationTimestamp="2026-02-01 07:24:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:49.972253039 +0000 UTC m=+88.695351324" watchObservedRunningTime="2026-02-01 07:24:49.991630074 +0000 UTC m=+88.714728329" Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.005778 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.018770 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-bvkr8" podStartSLOduration=68.018754125 podStartE2EDuration="1m8.018754125s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:50.015513263 +0000 UTC m=+88.738611518" watchObservedRunningTime="2026-02-01 07:24:50.018754125 +0000 UTC m=+88.741852380" Feb 01 07:24:50 crc kubenswrapper[4650]: W0201 07:24:50.030320 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e956e84_1ed8_43f6_8a64_3ac18a2db5bb.slice/crio-1241aa469dcc945bd4691105d472baf3e63ca180a1edd2f047382c6e495b85c9 WatchSource:0}: Error finding container 1241aa469dcc945bd4691105d472baf3e63ca180a1edd2f047382c6e495b85c9: Status 404 returned error can't find the container with id 1241aa469dcc945bd4691105d472baf3e63ca180a1edd2f047382c6e495b85c9 Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.062705 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-gz868" podStartSLOduration=68.062675806 podStartE2EDuration="1m8.062675806s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:50.042949983 +0000 UTC m=+88.766048228" watchObservedRunningTime="2026-02-01 07:24:50.062675806 +0000 UTC m=+88.785774081" Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.087932 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-mscbj" podStartSLOduration=67.087903839 podStartE2EDuration="1m7.087903839s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:50.07421143 +0000 UTC m=+88.797309705" watchObservedRunningTime="2026-02-01 07:24:50.087903839 +0000 UTC m=+88.811002094" Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.125684 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=32.125653562 podStartE2EDuration="32.125653562s" podCreationTimestamp="2026-02-01 07:24:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:50.111872211 +0000 UTC m=+88.834970466" watchObservedRunningTime="2026-02-01 07:24:50.125653562 +0000 UTC m=+88.848751807" Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.648241 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" event={"ID":"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb","Type":"ContainerStarted","Data":"c53c904243acdc6744a7215a8ab1ff310ebf8125512d7641517a405f264f19f5"} Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.648898 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" event={"ID":"3e956e84-1ed8-43f6-8a64-3ac18a2db5bb","Type":"ContainerStarted","Data":"1241aa469dcc945bd4691105d472baf3e63ca180a1edd2f047382c6e495b85c9"} Feb 01 07:24:50 crc kubenswrapper[4650]: I0201 07:24:50.965169 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:50 crc kubenswrapper[4650]: E0201 07:24:50.965387 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:51 crc kubenswrapper[4650]: I0201 07:24:51.965096 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:51 crc kubenswrapper[4650]: I0201 07:24:51.965183 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:51 crc kubenswrapper[4650]: I0201 07:24:51.966165 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:51 crc kubenswrapper[4650]: E0201 07:24:51.967146 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:51 crc kubenswrapper[4650]: E0201 07:24:51.967298 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:51 crc kubenswrapper[4650]: E0201 07:24:51.967434 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:52 crc kubenswrapper[4650]: I0201 07:24:52.964316 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:52 crc kubenswrapper[4650]: E0201 07:24:52.964727 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:53 crc kubenswrapper[4650]: I0201 07:24:53.965158 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:53 crc kubenswrapper[4650]: I0201 07:24:53.965203 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:53 crc kubenswrapper[4650]: E0201 07:24:53.965348 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:53 crc kubenswrapper[4650]: E0201 07:24:53.965496 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:53 crc kubenswrapper[4650]: I0201 07:24:53.966608 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:53 crc kubenswrapper[4650]: E0201 07:24:53.966889 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:54 crc kubenswrapper[4650]: I0201 07:24:54.965601 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:54 crc kubenswrapper[4650]: E0201 07:24:54.966205 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:55 crc kubenswrapper[4650]: I0201 07:24:55.965330 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:55 crc kubenswrapper[4650]: I0201 07:24:55.965480 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:55 crc kubenswrapper[4650]: I0201 07:24:55.965382 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:55 crc kubenswrapper[4650]: E0201 07:24:55.965623 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:55 crc kubenswrapper[4650]: E0201 07:24:55.965770 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:55 crc kubenswrapper[4650]: E0201 07:24:55.966020 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:56 crc kubenswrapper[4650]: I0201 07:24:56.965281 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:56 crc kubenswrapper[4650]: E0201 07:24:56.966001 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:56 crc kubenswrapper[4650]: I0201 07:24:56.966354 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:24:56 crc kubenswrapper[4650]: E0201 07:24:56.966582 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:24:57 crc kubenswrapper[4650]: I0201 07:24:57.965219 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:57 crc kubenswrapper[4650]: I0201 07:24:57.965352 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:57 crc kubenswrapper[4650]: E0201 07:24:57.965456 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:24:57 crc kubenswrapper[4650]: E0201 07:24:57.965564 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:57 crc kubenswrapper[4650]: I0201 07:24:57.965261 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:57 crc kubenswrapper[4650]: E0201 07:24:57.965713 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:58 crc kubenswrapper[4650]: I0201 07:24:58.964975 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:24:58 crc kubenswrapper[4650]: E0201 07:24:58.965265 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:24:59 crc kubenswrapper[4650]: I0201 07:24:59.965135 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:24:59 crc kubenswrapper[4650]: I0201 07:24:59.965247 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:24:59 crc kubenswrapper[4650]: I0201 07:24:59.965266 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:24:59 crc kubenswrapper[4650]: E0201 07:24:59.965486 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:24:59 crc kubenswrapper[4650]: E0201 07:24:59.965695 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:24:59 crc kubenswrapper[4650]: E0201 07:24:59.966220 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:00 crc kubenswrapper[4650]: I0201 07:25:00.964872 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:00 crc kubenswrapper[4650]: E0201 07:25:00.965145 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:01 crc kubenswrapper[4650]: I0201 07:25:01.618711 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:01 crc kubenswrapper[4650]: E0201 07:25:01.619018 4650 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:25:01 crc kubenswrapper[4650]: E0201 07:25:01.619165 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs podName:f4593d40-c6e1-42fa-8c18-053ff31304b3 nodeName:}" failed. No retries permitted until 2026-02-01 07:26:05.619136136 +0000 UTC m=+164.342234411 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs") pod "network-metrics-daemon-jvgsf" (UID: "f4593d40-c6e1-42fa-8c18-053ff31304b3") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 01 07:25:01 crc kubenswrapper[4650]: I0201 07:25:01.964790 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:01 crc kubenswrapper[4650]: I0201 07:25:01.964809 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:01 crc kubenswrapper[4650]: I0201 07:25:01.964848 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:01 crc kubenswrapper[4650]: E0201 07:25:01.965960 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:01 crc kubenswrapper[4650]: E0201 07:25:01.966044 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:01 crc kubenswrapper[4650]: E0201 07:25:01.966155 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:02 crc kubenswrapper[4650]: I0201 07:25:02.965885 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:02 crc kubenswrapper[4650]: E0201 07:25:02.966148 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:03 crc kubenswrapper[4650]: I0201 07:25:03.965095 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:03 crc kubenswrapper[4650]: I0201 07:25:03.965161 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:03 crc kubenswrapper[4650]: I0201 07:25:03.965096 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:03 crc kubenswrapper[4650]: E0201 07:25:03.965381 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:03 crc kubenswrapper[4650]: E0201 07:25:03.965529 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:03 crc kubenswrapper[4650]: E0201 07:25:03.965718 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:04 crc kubenswrapper[4650]: I0201 07:25:04.964861 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:04 crc kubenswrapper[4650]: E0201 07:25:04.965137 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:05 crc kubenswrapper[4650]: I0201 07:25:05.965077 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:05 crc kubenswrapper[4650]: I0201 07:25:05.965181 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:05 crc kubenswrapper[4650]: I0201 07:25:05.965071 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:05 crc kubenswrapper[4650]: E0201 07:25:05.965317 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:05 crc kubenswrapper[4650]: E0201 07:25:05.965520 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:05 crc kubenswrapper[4650]: E0201 07:25:05.965779 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:06 crc kubenswrapper[4650]: I0201 07:25:06.964904 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:06 crc kubenswrapper[4650]: E0201 07:25:06.965220 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:07 crc kubenswrapper[4650]: I0201 07:25:07.964994 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:07 crc kubenswrapper[4650]: I0201 07:25:07.965146 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:07 crc kubenswrapper[4650]: E0201 07:25:07.965284 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:07 crc kubenswrapper[4650]: I0201 07:25:07.964999 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:07 crc kubenswrapper[4650]: E0201 07:25:07.965815 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:07 crc kubenswrapper[4650]: E0201 07:25:07.966070 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:07 crc kubenswrapper[4650]: I0201 07:25:07.966321 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:25:07 crc kubenswrapper[4650]: E0201 07:25:07.966618 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hm5cs_openshift-ovn-kubernetes(ef0e87ea-6edd-4e89-a09b-01f62f763ba1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" Feb 01 07:25:08 crc kubenswrapper[4650]: I0201 07:25:08.964970 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:08 crc kubenswrapper[4650]: E0201 07:25:08.965244 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:09 crc kubenswrapper[4650]: I0201 07:25:09.964917 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:09 crc kubenswrapper[4650]: I0201 07:25:09.964928 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:09 crc kubenswrapper[4650]: I0201 07:25:09.965162 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:09 crc kubenswrapper[4650]: E0201 07:25:09.965323 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:09 crc kubenswrapper[4650]: E0201 07:25:09.965558 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:09 crc kubenswrapper[4650]: E0201 07:25:09.966021 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:10 crc kubenswrapper[4650]: I0201 07:25:10.964162 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:10 crc kubenswrapper[4650]: E0201 07:25:10.964350 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:11 crc kubenswrapper[4650]: I0201 07:25:11.964574 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:11 crc kubenswrapper[4650]: I0201 07:25:11.964574 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:11 crc kubenswrapper[4650]: I0201 07:25:11.966255 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:11 crc kubenswrapper[4650]: E0201 07:25:11.966727 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:11 crc kubenswrapper[4650]: E0201 07:25:11.966914 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:11 crc kubenswrapper[4650]: E0201 07:25:11.967022 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:12 crc kubenswrapper[4650]: I0201 07:25:12.964723 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:12 crc kubenswrapper[4650]: E0201 07:25:12.966000 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:13 crc kubenswrapper[4650]: I0201 07:25:13.964785 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:13 crc kubenswrapper[4650]: I0201 07:25:13.964863 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:13 crc kubenswrapper[4650]: I0201 07:25:13.964785 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:13 crc kubenswrapper[4650]: E0201 07:25:13.965007 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:13 crc kubenswrapper[4650]: E0201 07:25:13.965147 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:13 crc kubenswrapper[4650]: E0201 07:25:13.965260 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:14 crc kubenswrapper[4650]: I0201 07:25:14.964815 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:14 crc kubenswrapper[4650]: E0201 07:25:14.965088 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:15 crc kubenswrapper[4650]: I0201 07:25:15.965007 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:15 crc kubenswrapper[4650]: I0201 07:25:15.965091 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:15 crc kubenswrapper[4650]: I0201 07:25:15.965007 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:15 crc kubenswrapper[4650]: E0201 07:25:15.965272 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:15 crc kubenswrapper[4650]: E0201 07:25:15.965439 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:15 crc kubenswrapper[4650]: E0201 07:25:15.965717 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:16 crc kubenswrapper[4650]: I0201 07:25:16.964209 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:16 crc kubenswrapper[4650]: E0201 07:25:16.964393 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.785409 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/1.log" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.787312 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/0.log" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.787423 4650 generic.go:334] "Generic (PLEG): container finished" podID="e408ebb2-07fc-4317-92d4-1316ece830fb" containerID="c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012" exitCode=1 Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.787509 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerDied","Data":"c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012"} Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.787615 4650 scope.go:117] "RemoveContainer" containerID="36dc92f6e3a91e2798036023915ee8fbaf2ec4f12d26f8880597d4ff5ab23b98" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.788817 4650 scope.go:117] "RemoveContainer" containerID="c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012" Feb 01 07:25:17 crc kubenswrapper[4650]: E0201 07:25:17.789204 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-k6xtw_openshift-multus(e408ebb2-07fc-4317-92d4-1316ece830fb)\"" pod="openshift-multus/multus-k6xtw" podUID="e408ebb2-07fc-4317-92d4-1316ece830fb" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.820511 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7h8xq" podStartSLOduration=95.820472538 podStartE2EDuration="1m35.820472538s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:24:50.669210266 +0000 UTC m=+89.392308521" watchObservedRunningTime="2026-02-01 07:25:17.820472538 +0000 UTC m=+116.543570823" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.964728 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.964789 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:17 crc kubenswrapper[4650]: E0201 07:25:17.964884 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:17 crc kubenswrapper[4650]: I0201 07:25:17.964991 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:17 crc kubenswrapper[4650]: E0201 07:25:17.965268 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:17 crc kubenswrapper[4650]: E0201 07:25:17.965414 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:18 crc kubenswrapper[4650]: I0201 07:25:18.794325 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/1.log" Feb 01 07:25:18 crc kubenswrapper[4650]: I0201 07:25:18.965158 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:18 crc kubenswrapper[4650]: E0201 07:25:18.965407 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:19 crc kubenswrapper[4650]: I0201 07:25:19.964406 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:19 crc kubenswrapper[4650]: I0201 07:25:19.964467 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:19 crc kubenswrapper[4650]: I0201 07:25:19.964467 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:19 crc kubenswrapper[4650]: E0201 07:25:19.964633 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:19 crc kubenswrapper[4650]: E0201 07:25:19.964864 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:19 crc kubenswrapper[4650]: E0201 07:25:19.964960 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:20 crc kubenswrapper[4650]: I0201 07:25:20.964958 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:20 crc kubenswrapper[4650]: E0201 07:25:20.966175 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:21 crc kubenswrapper[4650]: E0201 07:25:21.894087 4650 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Feb 01 07:25:21 crc kubenswrapper[4650]: I0201 07:25:21.964549 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:21 crc kubenswrapper[4650]: I0201 07:25:21.964635 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:21 crc kubenswrapper[4650]: I0201 07:25:21.964644 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:21 crc kubenswrapper[4650]: E0201 07:25:21.966294 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:21 crc kubenswrapper[4650]: E0201 07:25:21.966655 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:21 crc kubenswrapper[4650]: E0201 07:25:21.966889 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:22 crc kubenswrapper[4650]: E0201 07:25:22.051126 4650 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 01 07:25:22 crc kubenswrapper[4650]: I0201 07:25:22.965278 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:22 crc kubenswrapper[4650]: E0201 07:25:22.965561 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:22 crc kubenswrapper[4650]: I0201 07:25:22.966116 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.815340 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/3.log" Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.819029 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerStarted","Data":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.819441 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.965259 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.965310 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.965484 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:23 crc kubenswrapper[4650]: E0201 07:25:23.965608 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:23 crc kubenswrapper[4650]: E0201 07:25:23.965753 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:23 crc kubenswrapper[4650]: E0201 07:25:23.965964 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.985690 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podStartSLOduration=101.985667168 podStartE2EDuration="1m41.985667168s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:23.848162751 +0000 UTC m=+122.571260996" watchObservedRunningTime="2026-02-01 07:25:23.985667168 +0000 UTC m=+122.708765413" Feb 01 07:25:23 crc kubenswrapper[4650]: I0201 07:25:23.986183 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jvgsf"] Feb 01 07:25:24 crc kubenswrapper[4650]: I0201 07:25:24.823721 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:24 crc kubenswrapper[4650]: E0201 07:25:24.824753 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:24 crc kubenswrapper[4650]: I0201 07:25:24.965166 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:24 crc kubenswrapper[4650]: E0201 07:25:24.965377 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:25 crc kubenswrapper[4650]: I0201 07:25:25.965089 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:25 crc kubenswrapper[4650]: I0201 07:25:25.965106 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:25 crc kubenswrapper[4650]: I0201 07:25:25.966151 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:25 crc kubenswrapper[4650]: E0201 07:25:25.966359 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:25 crc kubenswrapper[4650]: E0201 07:25:25.966601 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:25 crc kubenswrapper[4650]: E0201 07:25:25.966684 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:26 crc kubenswrapper[4650]: I0201 07:25:26.964578 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:26 crc kubenswrapper[4650]: E0201 07:25:26.964777 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:27 crc kubenswrapper[4650]: E0201 07:25:27.052619 4650 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 01 07:25:27 crc kubenswrapper[4650]: I0201 07:25:27.964834 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:27 crc kubenswrapper[4650]: E0201 07:25:27.965149 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:27 crc kubenswrapper[4650]: I0201 07:25:27.965566 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:27 crc kubenswrapper[4650]: I0201 07:25:27.965678 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:27 crc kubenswrapper[4650]: E0201 07:25:27.966159 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:27 crc kubenswrapper[4650]: E0201 07:25:27.966271 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:28 crc kubenswrapper[4650]: I0201 07:25:28.965160 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:28 crc kubenswrapper[4650]: E0201 07:25:28.965388 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:29 crc kubenswrapper[4650]: I0201 07:25:29.964524 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:29 crc kubenswrapper[4650]: I0201 07:25:29.964524 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:29 crc kubenswrapper[4650]: E0201 07:25:29.964787 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:29 crc kubenswrapper[4650]: E0201 07:25:29.964926 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:29 crc kubenswrapper[4650]: I0201 07:25:29.964551 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:29 crc kubenswrapper[4650]: E0201 07:25:29.965118 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:30 crc kubenswrapper[4650]: I0201 07:25:30.964500 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:30 crc kubenswrapper[4650]: E0201 07:25:30.965215 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:31 crc kubenswrapper[4650]: I0201 07:25:31.964550 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:31 crc kubenswrapper[4650]: I0201 07:25:31.964637 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:31 crc kubenswrapper[4650]: I0201 07:25:31.964799 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:31 crc kubenswrapper[4650]: E0201 07:25:31.966985 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:31 crc kubenswrapper[4650]: E0201 07:25:31.967106 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:31 crc kubenswrapper[4650]: E0201 07:25:31.967258 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:31 crc kubenswrapper[4650]: I0201 07:25:31.967309 4650 scope.go:117] "RemoveContainer" containerID="c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012" Feb 01 07:25:32 crc kubenswrapper[4650]: E0201 07:25:32.060656 4650 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 01 07:25:32 crc kubenswrapper[4650]: I0201 07:25:32.859710 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/1.log" Feb 01 07:25:32 crc kubenswrapper[4650]: I0201 07:25:32.859779 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerStarted","Data":"18d71ea0d1e0ca8b54e4bd06f8df0d55bbe23fe80bbaf025dcf0468ad1399f99"} Feb 01 07:25:32 crc kubenswrapper[4650]: I0201 07:25:32.964911 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:32 crc kubenswrapper[4650]: E0201 07:25:32.965168 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:33 crc kubenswrapper[4650]: I0201 07:25:33.964580 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:33 crc kubenswrapper[4650]: I0201 07:25:33.964611 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:33 crc kubenswrapper[4650]: I0201 07:25:33.964580 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:33 crc kubenswrapper[4650]: E0201 07:25:33.964753 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:33 crc kubenswrapper[4650]: E0201 07:25:33.964870 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:33 crc kubenswrapper[4650]: E0201 07:25:33.965095 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:34 crc kubenswrapper[4650]: I0201 07:25:34.965000 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:34 crc kubenswrapper[4650]: E0201 07:25:34.965408 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:35 crc kubenswrapper[4650]: I0201 07:25:35.964653 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:35 crc kubenswrapper[4650]: I0201 07:25:35.964710 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:35 crc kubenswrapper[4650]: I0201 07:25:35.964736 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:35 crc kubenswrapper[4650]: E0201 07:25:35.964977 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jvgsf" podUID="f4593d40-c6e1-42fa-8c18-053ff31304b3" Feb 01 07:25:35 crc kubenswrapper[4650]: E0201 07:25:35.965238 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 01 07:25:35 crc kubenswrapper[4650]: E0201 07:25:35.965417 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 01 07:25:36 crc kubenswrapper[4650]: I0201 07:25:36.964633 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:36 crc kubenswrapper[4650]: E0201 07:25:36.964797 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.964346 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.964459 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.964680 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.975901 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.976076 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.976330 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.976458 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.976490 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 01 07:25:37 crc kubenswrapper[4650]: I0201 07:25:37.976676 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 01 07:25:38 crc kubenswrapper[4650]: I0201 07:25:38.964627 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.418348 4650 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.474270 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-24q9r"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.475421 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.477479 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5fhkk"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.478288 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.479352 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.480175 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.480384 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sj7gj"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.481130 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.481284 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.481327 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.489918 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.500373 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.500548 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.500607 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.501459 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.509389 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-tfv5c"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.511778 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.515482 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.515785 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.515983 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.518377 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.518713 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.524286 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.525323 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.526073 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.528390 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.529169 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.529381 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.529582 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.531944 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.532190 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.532392 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.532564 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.534716 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.534750 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.535191 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.535258 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.535517 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.535790 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.535904 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.536059 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.536416 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.536641 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.536867 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.537408 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.537540 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539187 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-config\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539222 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-serving-cert\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539251 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e5471807-9088-4007-b0dc-b68760e76415-audit-dir\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539279 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbcgl\" (UniqueName: \"kubernetes.io/projected/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-kube-api-access-pbcgl\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539298 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6568a223-ba53-4690-9378-08b043d9db27-serving-cert\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539318 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-etcd-client\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539334 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-client-ca\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539352 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e5471807-9088-4007-b0dc-b68760e76415-node-pullsecrets\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539367 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-audit\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539385 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsd4f\" (UniqueName: \"kubernetes.io/projected/e5471807-9088-4007-b0dc-b68760e76415-kube-api-access-nsd4f\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539407 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj6ds\" (UniqueName: \"kubernetes.io/projected/6568a223-ba53-4690-9378-08b043d9db27-kube-api-access-jj6ds\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539422 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539438 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-client-ca\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539462 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-etcd-serving-ca\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539480 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-config\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539497 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-image-import-ca\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539527 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d720972-b11e-48ba-a5ee-9ceef5808130-config\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539546 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-encryption-config\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539564 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-serving-cert\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539580 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-config\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539598 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dj9x8\" (UniqueName: \"kubernetes.io/projected/6d720972-b11e-48ba-a5ee-9ceef5808130-kube-api-access-dj9x8\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539616 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6d720972-b11e-48ba-a5ee-9ceef5808130-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539635 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-trusted-ca-bundle\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539663 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6d720972-b11e-48ba-a5ee-9ceef5808130-images\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539718 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.539959 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.540152 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.540405 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.540456 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.540920 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.541413 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.541582 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.541717 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.541714 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.541919 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.542339 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-xql62"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.542997 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.550721 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.551388 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.551603 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.551716 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.551801 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.551961 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.551985 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.552181 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.552348 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.552442 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.552538 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.552832 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-s4bcm"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.553377 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r65td"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.553939 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.555990 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-snf8v"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.557143 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.557829 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.558001 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.558080 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.559005 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.559878 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.561292 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.561698 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.562104 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.562676 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-xfg9f"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.563279 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-nnskb"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.563726 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.564074 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-xfg9f" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.573516 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.574232 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.576308 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.577707 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.577861 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.578921 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.579338 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.579470 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.579798 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.580115 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.580891 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.581977 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.589786 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-crkwn"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.598876 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.612591 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.632932 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.633560 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.633834 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.633881 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.633909 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634087 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634162 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634229 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634235 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-q9fms"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634320 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634712 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634837 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.635241 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.635550 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.635833 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.634168 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.635969 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.635984 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.636119 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.636131 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.636228 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.636321 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.636328 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.636392 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.636919 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637152 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637271 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637357 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637439 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637536 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637711 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637795 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.637923 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.638242 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.638347 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.641106 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.641764 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138bf70b-dbf6-41a8-8595-6451279e8080-config\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.641807 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.641840 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e5471807-9088-4007-b0dc-b68760e76415-audit-dir\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.641858 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-etcd-client\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.641880 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/416589cc-479e-45e0-8fad-2ccd30115769-audit-dir\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.641976 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e5471807-9088-4007-b0dc-b68760e76415-audit-dir\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.642548 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.642914 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbcgl\" (UniqueName: \"kubernetes.io/projected/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-kube-api-access-pbcgl\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.642950 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.642991 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v84t\" (UniqueName: \"kubernetes.io/projected/416589cc-479e-45e0-8fad-2ccd30115769-kube-api-access-9v84t\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643015 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acd8745a-8f0b-4589-be83-b3673735409e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643066 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jjc7\" (UniqueName: \"kubernetes.io/projected/77633ea9-7071-47d2-a623-708e4cd5b99f-kube-api-access-7jjc7\") pod \"dns-operator-744455d44c-nnskb\" (UID: \"77633ea9-7071-47d2-a623-708e4cd5b99f\") " pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643087 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6568a223-ba53-4690-9378-08b043d9db27-serving-cert\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643125 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643147 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/138bf70b-dbf6-41a8-8595-6451279e8080-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643168 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643209 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-etcd-client\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643229 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acd8745a-8f0b-4589-be83-b3673735409e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643250 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-client-ca\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643290 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-oauth-config\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643310 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-audit-policies\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643327 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkphg\" (UniqueName: \"kubernetes.io/projected/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-kube-api-access-hkphg\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643366 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/77633ea9-7071-47d2-a623-708e4cd5b99f-metrics-tls\") pod \"dns-operator-744455d44c-nnskb\" (UID: \"77633ea9-7071-47d2-a623-708e4cd5b99f\") " pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643386 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e5471807-9088-4007-b0dc-b68760e76415-node-pullsecrets\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643404 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-audit\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643445 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643465 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsd4f\" (UniqueName: \"kubernetes.io/projected/e5471807-9088-4007-b0dc-b68760e76415-kube-api-access-nsd4f\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643485 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj6ds\" (UniqueName: \"kubernetes.io/projected/6568a223-ba53-4690-9378-08b043d9db27-kube-api-access-jj6ds\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643524 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-config\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643543 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-serving-cert\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643561 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vchvp\" (UniqueName: \"kubernetes.io/projected/a09c591f-ba12-43d6-98bf-003df4aa5813-kube-api-access-vchvp\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643597 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdn56\" (UniqueName: \"kubernetes.io/projected/776aae02-31ea-4a89-afa0-11a2bd798df2-kube-api-access-rdn56\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643619 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643639 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-trusted-ca-bundle\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643674 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643697 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-client-ca\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643714 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a09c591f-ba12-43d6-98bf-003df4aa5813-serving-cert\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643754 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-serving-cert\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643785 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-encryption-config\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643804 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643843 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jv2d\" (UniqueName: \"kubernetes.io/projected/169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4-kube-api-access-4jv2d\") pod \"downloads-7954f5f757-xfg9f\" (UID: \"169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4\") " pod="openshift-console/downloads-7954f5f757-xfg9f" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643861 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9kpv\" (UniqueName: \"kubernetes.io/projected/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-kube-api-access-d9kpv\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643879 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-etcd-serving-ca\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643918 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643938 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-config\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643955 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/a09c591f-ba12-43d6-98bf-003df4aa5813-available-featuregates\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.643991 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644009 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-audit-dir\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644054 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-audit-policies\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644073 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-image-import-ca\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644091 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644152 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d720972-b11e-48ba-a5ee-9ceef5808130-config\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644172 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx5nn\" (UniqueName: \"kubernetes.io/projected/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-kube-api-access-wx5nn\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644209 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644232 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-console-config\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644250 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644267 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-service-ca-bundle\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644307 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-encryption-config\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644324 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644345 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-serving-cert\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644383 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-serving-cert\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644402 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndtd6\" (UniqueName: \"kubernetes.io/projected/2298718f-d9f4-4714-acbb-01739d0c7b62-kube-api-access-ndtd6\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644420 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-config\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644457 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-service-ca\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644475 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-oauth-serving-cert\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644493 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dj9x8\" (UniqueName: \"kubernetes.io/projected/6d720972-b11e-48ba-a5ee-9ceef5808130-kube-api-access-dj9x8\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644528 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/776aae02-31ea-4a89-afa0-11a2bd798df2-auth-proxy-config\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644550 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72rlx\" (UniqueName: \"kubernetes.io/projected/acd8745a-8f0b-4589-be83-b3673735409e-kube-api-access-72rlx\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644568 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg4v7\" (UniqueName: \"kubernetes.io/projected/42bb4278-3f91-4824-ba06-22af4099f7e4-kube-api-access-hg4v7\") pod \"migrator-59844c95c7-jxwb5\" (UID: \"42bb4278-3f91-4824-ba06-22af4099f7e4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644606 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6d720972-b11e-48ba-a5ee-9ceef5808130-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644628 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-trusted-ca-bundle\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644647 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776aae02-31ea-4a89-afa0-11a2bd798df2-config\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644697 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6d720972-b11e-48ba-a5ee-9ceef5808130-images\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644719 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644739 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-metrics-tls\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644777 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-trusted-ca\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644795 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/776aae02-31ea-4a89-afa0-11a2bd798df2-machine-approver-tls\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644813 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644859 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-config\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644876 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-serving-cert\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.644895 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdv5d\" (UniqueName: \"kubernetes.io/projected/138bf70b-dbf6-41a8-8595-6451279e8080-kube-api-access-zdv5d\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.647008 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.648870 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.649666 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.650094 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-client-ca\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.650183 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e5471807-9088-4007-b0dc-b68760e76415-node-pullsecrets\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.650904 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-audit\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.650925 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.652000 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.652189 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.652423 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.653193 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-client-ca\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.654820 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-etcd-serving-ca\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.656332 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.660758 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.663462 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.663585 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.664285 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6568a223-ba53-4690-9378-08b043d9db27-serving-cert\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.664476 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-image-import-ca\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.664535 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.664904 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d720972-b11e-48ba-a5ee-9ceef5808130-config\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.665693 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-trusted-ca-bundle\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.665956 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6d720972-b11e-48ba-a5ee-9ceef5808130-images\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.667351 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.671669 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6d720972-b11e-48ba-a5ee-9ceef5808130-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.672093 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-config\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.672480 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.673280 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5471807-9088-4007-b0dc-b68760e76415-config\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.674108 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.683384 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-s5qql"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.676984 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.700639 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.701179 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-etcd-client\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.701810 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-config\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.702059 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-serving-cert\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.702325 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e5471807-9088-4007-b0dc-b68760e76415-encryption-config\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.703918 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.709772 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-serving-cert\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.721724 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.730552 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.730864 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.736060 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tprml"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.737204 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.742010 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.743130 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.743790 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.745221 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.745589 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.746625 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.746962 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752646 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752701 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx5nn\" (UniqueName: \"kubernetes.io/projected/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-kube-api-access-wx5nn\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752737 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752756 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-service-ca-bundle\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752781 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752800 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-console-config\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752819 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752838 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-serving-cert\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752858 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndtd6\" (UniqueName: \"kubernetes.io/projected/2298718f-d9f4-4714-acbb-01739d0c7b62-kube-api-access-ndtd6\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752879 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-oauth-serving-cert\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752907 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-service-ca\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752929 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg4v7\" (UniqueName: \"kubernetes.io/projected/42bb4278-3f91-4824-ba06-22af4099f7e4-kube-api-access-hg4v7\") pod \"migrator-59844c95c7-jxwb5\" (UID: \"42bb4278-3f91-4824-ba06-22af4099f7e4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752949 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/776aae02-31ea-4a89-afa0-11a2bd798df2-auth-proxy-config\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752969 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72rlx\" (UniqueName: \"kubernetes.io/projected/acd8745a-8f0b-4589-be83-b3673735409e-kube-api-access-72rlx\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.752991 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776aae02-31ea-4a89-afa0-11a2bd798df2-config\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753033 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-metrics-tls\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753051 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753067 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/776aae02-31ea-4a89-afa0-11a2bd798df2-machine-approver-tls\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753085 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753109 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-trusted-ca\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753132 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdv5d\" (UniqueName: \"kubernetes.io/projected/138bf70b-dbf6-41a8-8595-6451279e8080-kube-api-access-zdv5d\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753154 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138bf70b-dbf6-41a8-8595-6451279e8080-config\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753171 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753190 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/416589cc-479e-45e0-8fad-2ccd30115769-audit-dir\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753205 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-etcd-client\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753220 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753240 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v84t\" (UniqueName: \"kubernetes.io/projected/416589cc-479e-45e0-8fad-2ccd30115769-kube-api-access-9v84t\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753261 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acd8745a-8f0b-4589-be83-b3673735409e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753278 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jjc7\" (UniqueName: \"kubernetes.io/projected/77633ea9-7071-47d2-a623-708e4cd5b99f-kube-api-access-7jjc7\") pod \"dns-operator-744455d44c-nnskb\" (UID: \"77633ea9-7071-47d2-a623-708e4cd5b99f\") " pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753296 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753314 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/138bf70b-dbf6-41a8-8595-6451279e8080-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753328 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753346 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acd8745a-8f0b-4589-be83-b3673735409e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753362 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-oauth-config\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753380 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-audit-policies\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753397 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkphg\" (UniqueName: \"kubernetes.io/projected/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-kube-api-access-hkphg\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753413 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/77633ea9-7071-47d2-a623-708e4cd5b99f-metrics-tls\") pod \"dns-operator-744455d44c-nnskb\" (UID: \"77633ea9-7071-47d2-a623-708e4cd5b99f\") " pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753434 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753462 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-config\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753476 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-serving-cert\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753495 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vchvp\" (UniqueName: \"kubernetes.io/projected/a09c591f-ba12-43d6-98bf-003df4aa5813-kube-api-access-vchvp\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753514 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdn56\" (UniqueName: \"kubernetes.io/projected/776aae02-31ea-4a89-afa0-11a2bd798df2-kube-api-access-rdn56\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753532 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-trusted-ca-bundle\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753548 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753565 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-serving-cert\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753583 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a09c591f-ba12-43d6-98bf-003df4aa5813-serving-cert\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753605 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-encryption-config\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753621 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753637 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9kpv\" (UniqueName: \"kubernetes.io/projected/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-kube-api-access-d9kpv\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753654 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jv2d\" (UniqueName: \"kubernetes.io/projected/169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4-kube-api-access-4jv2d\") pod \"downloads-7954f5f757-xfg9f\" (UID: \"169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4\") " pod="openshift-console/downloads-7954f5f757-xfg9f" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753673 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753690 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/a09c591f-ba12-43d6-98bf-003df4aa5813-available-featuregates\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753707 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753723 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-audit-policies\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753738 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-audit-dir\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.753817 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-audit-dir\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.755576 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.757205 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.757664 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.758159 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.767644 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-trusted-ca-bundle\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.759070 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.759821 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acd8745a-8f0b-4589-be83-b3673735409e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.766153 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.758492 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.768318 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/138bf70b-dbf6-41a8-8595-6451279e8080-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.771049 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.771368 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/a09c591f-ba12-43d6-98bf-003df4aa5813-available-featuregates\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.771947 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.772441 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-audit-policies\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.772623 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-service-ca-bundle\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.772698 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.772967 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.773340 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.773596 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-console-config\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.773851 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.774074 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-encryption-config\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.774223 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.775850 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sl4ph"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.776106 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.776384 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-tfv5c"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.776484 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.776715 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.777410 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138bf70b-dbf6-41a8-8595-6451279e8080-config\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.778642 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-oauth-serving-cert\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.779329 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-service-ca\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.780364 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/776aae02-31ea-4a89-afa0-11a2bd798df2-auth-proxy-config\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.781137 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776aae02-31ea-4a89-afa0-11a2bd798df2-config\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.781723 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.783370 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.783834 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.784621 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-metrics-tls\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.785304 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-serving-cert\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.785748 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.785883 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/776aae02-31ea-4a89-afa0-11a2bd798df2-machine-approver-tls\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.788214 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.788225 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-audit-policies\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.788321 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/416589cc-479e-45e0-8fad-2ccd30115769-audit-dir\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.788840 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.789604 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-zkcsv"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.789697 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-trusted-ca\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.789957 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.790641 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.790805 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.791487 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-24q9r"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.791728 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.791978 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.792135 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-config\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.792233 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.792363 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-f5d8l"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.792593 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-serving-cert\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.792975 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.793254 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.794195 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.796963 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5fhkk"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.797009 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.797035 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-s4bcm"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.797463 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-oauth-config\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.798192 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-etcd-client\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.798450 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acd8745a-8f0b-4589-be83-b3673735409e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.798582 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a09c591f-ba12-43d6-98bf-003df4aa5813-serving-cert\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.799180 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sj7gj"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.799611 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/77633ea9-7071-47d2-a623-708e4cd5b99f-metrics-tls\") pod \"dns-operator-744455d44c-nnskb\" (UID: \"77633ea9-7071-47d2-a623-708e4cd5b99f\") " pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.801412 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.801524 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-xql62"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.802288 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.803531 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.803849 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-serving-cert\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.804053 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r65td"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.805084 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.807146 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.808621 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-nnskb"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.812505 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jqdv7"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.813442 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.813781 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.815389 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-s5qql"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.816713 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.817713 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.819332 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-snf8v"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.819949 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.821284 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-xfg9f"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.822604 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.824008 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.825680 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.826676 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.827795 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.838280 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-crkwn"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.838559 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.838647 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.840425 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.841456 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jqdv7"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.844924 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.846937 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sl4ph"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.848196 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.849415 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-q9fms"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.850709 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.852225 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.853437 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tprml"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.855251 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.856346 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.858255 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.862881 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-jr9qf"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.863696 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.864084 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nsmmn"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.865269 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-jr9qf"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.865347 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.866333 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nsmmn"] Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.878703 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.898642 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.918336 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.938505 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.958146 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.977978 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 01 07:25:40 crc kubenswrapper[4650]: I0201 07:25:40.997646 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.027131 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.038494 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.079183 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.118959 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.120551 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbcgl\" (UniqueName: \"kubernetes.io/projected/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-kube-api-access-pbcgl\") pod \"route-controller-manager-6576b87f9c-5rj7j\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.156225 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsd4f\" (UniqueName: \"kubernetes.io/projected/e5471807-9088-4007-b0dc-b68760e76415-kube-api-access-nsd4f\") pod \"apiserver-76f77b778f-24q9r\" (UID: \"e5471807-9088-4007-b0dc-b68760e76415\") " pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.166534 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.173494 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj6ds\" (UniqueName: \"kubernetes.io/projected/6568a223-ba53-4690-9378-08b043d9db27-kube-api-access-jj6ds\") pod \"controller-manager-879f6c89f-5fhkk\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.179430 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.198856 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.217789 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.237665 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.258380 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.278500 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.298822 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.339046 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.339643 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dj9x8\" (UniqueName: \"kubernetes.io/projected/6d720972-b11e-48ba-a5ee-9ceef5808130-kube-api-access-dj9x8\") pod \"machine-api-operator-5694c8668f-sj7gj\" (UID: \"6d720972-b11e-48ba-a5ee-9ceef5808130\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.368760 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.377359 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.398835 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.413320 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.419395 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.436678 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j"] Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.440202 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.441332 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.461920 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.479872 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.480639 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.501506 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.518603 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.549561 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.561409 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.581416 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.602956 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.617893 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.639340 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.658782 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.665948 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-24q9r"] Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.694344 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.697315 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5fhkk"] Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.701376 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.718931 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.737307 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.756732 4650 request.go:700] Waited for 1.007601359s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dolm-operator-serviceaccount-dockercfg-rq7zk&limit=500&resourceVersion=0 Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.758812 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.778094 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.800611 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.845820 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx5nn\" (UniqueName: \"kubernetes.io/projected/a2f10408-e875-4afe-89e3-9c63b8f4b2dc-kube-api-access-wx5nn\") pod \"apiserver-7bbb656c7d-b7vsd\" (UID: \"a2f10408-e875-4afe-89e3-9c63b8f4b2dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.871287 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vchvp\" (UniqueName: \"kubernetes.io/projected/a09c591f-ba12-43d6-98bf-003df4aa5813-kube-api-access-vchvp\") pod \"openshift-config-operator-7777fb866f-xql62\" (UID: \"a09c591f-ba12-43d6-98bf-003df4aa5813\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.880104 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jjc7\" (UniqueName: \"kubernetes.io/projected/77633ea9-7071-47d2-a623-708e4cd5b99f-kube-api-access-7jjc7\") pod \"dns-operator-744455d44c-nnskb\" (UID: \"77633ea9-7071-47d2-a623-708e4cd5b99f\") " pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.894406 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdn56\" (UniqueName: \"kubernetes.io/projected/776aae02-31ea-4a89-afa0-11a2bd798df2-kube-api-access-rdn56\") pod \"machine-approver-56656f9798-r8xbn\" (UID: \"776aae02-31ea-4a89-afa0-11a2bd798df2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.899357 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.906747 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" event={"ID":"6568a223-ba53-4690-9378-08b043d9db27","Type":"ContainerStarted","Data":"5001a7699ea470bee3f1ea7bb5af69b7360643ba6f3e8e60c1fbb54beaa06ad1"} Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.909151 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" event={"ID":"e5471807-9088-4007-b0dc-b68760e76415","Type":"ContainerStarted","Data":"b9c8e9fd9f1b0f5acf7bc0d6565753ce9e9af360c4964dd5276bc3f006fe7971"} Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.913255 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" event={"ID":"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd","Type":"ContainerStarted","Data":"744132a0de141f8ea0241edc43475b7614df3bca2e0d69c7c4a88b7c44a85e70"} Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.913312 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" event={"ID":"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd","Type":"ContainerStarted","Data":"6fbd678fd6ea677af7d2e774dfa29bcfe6d2c9fe4a233dfb10821cf3064f60c5"} Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.914328 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.916677 4650 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-5rj7j container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.916759 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.920323 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.938524 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.959274 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.963363 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.978897 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sj7gj"] Feb 01 07:25:41 crc kubenswrapper[4650]: I0201 07:25:41.980386 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 01 07:25:41 crc kubenswrapper[4650]: W0201 07:25:41.994458 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d720972_b11e_48ba_a5ee_9ceef5808130.slice/crio-94ce2e7b7c14931b6242b141c557717c281b0abd305a4549ed2d37e5b2ccad41 WatchSource:0}: Error finding container 94ce2e7b7c14931b6242b141c557717c281b0abd305a4549ed2d37e5b2ccad41: Status 404 returned error can't find the container with id 94ce2e7b7c14931b6242b141c557717c281b0abd305a4549ed2d37e5b2ccad41 Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.021959 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9kpv\" (UniqueName: \"kubernetes.io/projected/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-kube-api-access-d9kpv\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.028796 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.031567 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jv2d\" (UniqueName: \"kubernetes.io/projected/169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4-kube-api-access-4jv2d\") pod \"downloads-7954f5f757-xfg9f\" (UID: \"169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4\") " pod="openshift-console/downloads-7954f5f757-xfg9f" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.059524 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.064226 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rkvl7\" (UID: \"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.069146 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.074129 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-xfg9f" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.078157 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.079390 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.099756 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.122548 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.142435 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.160926 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.178693 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.178850 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.183628 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-xql62"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.200851 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.253560 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndtd6\" (UniqueName: \"kubernetes.io/projected/2298718f-d9f4-4714-acbb-01739d0c7b62-kube-api-access-ndtd6\") pod \"console-f9d7485db-snf8v\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.258163 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg4v7\" (UniqueName: \"kubernetes.io/projected/42bb4278-3f91-4824-ba06-22af4099f7e4-kube-api-access-hg4v7\") pod \"migrator-59844c95c7-jxwb5\" (UID: \"42bb4278-3f91-4824-ba06-22af4099f7e4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.273814 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.284352 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72rlx\" (UniqueName: \"kubernetes.io/projected/acd8745a-8f0b-4589-be83-b3673735409e-kube-api-access-72rlx\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2rsm\" (UID: \"acd8745a-8f0b-4589-be83-b3673735409e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:42 crc kubenswrapper[4650]: W0201 07:25:42.291244 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod776aae02_31ea_4a89_afa0_11a2bd798df2.slice/crio-f6967a787831d2ac9fd630620834667452ad98bfd014a3773d6ca8c06c962309 WatchSource:0}: Error finding container f6967a787831d2ac9fd630620834667452ad98bfd014a3773d6ca8c06c962309: Status 404 returned error can't find the container with id f6967a787831d2ac9fd630620834667452ad98bfd014a3773d6ca8c06c962309 Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.315564 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdv5d\" (UniqueName: \"kubernetes.io/projected/138bf70b-dbf6-41a8-8595-6451279e8080-kube-api-access-zdv5d\") pod \"openshift-apiserver-operator-796bbdcf4f-q4psv\" (UID: \"138bf70b-dbf6-41a8-8595-6451279e8080\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.316005 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkphg\" (UniqueName: \"kubernetes.io/projected/cdb1c5f5-c67b-45d5-af23-2168beaf2cae-kube-api-access-hkphg\") pod \"authentication-operator-69f744f599-tfv5c\" (UID: \"cdb1c5f5-c67b-45d5-af23-2168beaf2cae\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.320091 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.335042 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-xfg9f"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.338534 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v84t\" (UniqueName: \"kubernetes.io/projected/416589cc-479e-45e0-8fad-2ccd30115769-kube-api-access-9v84t\") pod \"oauth-openshift-558db77b4-r65td\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.339214 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.357666 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.358050 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.379843 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.399456 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.411463 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.413822 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.417865 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.439941 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.458707 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.478009 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.499144 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.519958 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.520299 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.539292 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.541633 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.559192 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.583557 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.588319 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-snf8v"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.599855 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.620798 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.638239 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.652344 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-nnskb"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.658520 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.664602 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705259 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-registry-tls\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705356 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-trusted-ca\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705422 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbctd\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-kube-api-access-gbctd\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705460 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-registry-certificates\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705484 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a0e132bd-4673-48b5-9362-32781a1f9405-installation-pull-secrets\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705556 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705639 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-bound-sa-token\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.705668 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a0e132bd-4673-48b5-9362-32781a1f9405-ca-trust-extracted\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: E0201 07:25:42.706431 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.206413369 +0000 UTC m=+141.929511614 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.711491 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r65td"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.721421 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.726592 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.743648 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.757534 4650 request.go:700] Waited for 1.893549514s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.759716 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.783829 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.803093 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.821470 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.825098 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.825547 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/badf229a-c84d-41c1-b283-e473e15ed647-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.826318 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/499b8a50-5954-4979-9473-63f0bae378f0-config-volume\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:42 crc kubenswrapper[4650]: E0201 07:25:42.826534 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.326509219 +0000 UTC m=+142.049607454 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.827393 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-registry-tls\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.827906 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-client\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.827993 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-config\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.828016 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/50135dde-6b69-4044-b285-0b33b617e7a9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.828095 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-trusted-ca\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.828114 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7lmz\" (UniqueName: \"kubernetes.io/projected/50135dde-6b69-4044-b285-0b33b617e7a9-kube-api-access-q7lmz\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.828162 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/badf229a-c84d-41c1-b283-e473e15ed647-config\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.832316 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-trusted-ca\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.832349 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-tfv5c"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.839917 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843127 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/badf229a-c84d-41c1-b283-e473e15ed647-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843173 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/50135dde-6b69-4044-b285-0b33b617e7a9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843240 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbctd\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-kube-api-access-gbctd\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843291 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-registry-certificates\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843315 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a0e132bd-4673-48b5-9362-32781a1f9405-installation-pull-secrets\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843402 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843428 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-config\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843449 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-trusted-ca\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843515 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrsdq\" (UniqueName: \"kubernetes.io/projected/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-kube-api-access-hrsdq\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843542 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9jrg\" (UniqueName: \"kubernetes.io/projected/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-kube-api-access-c9jrg\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843636 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-bound-sa-token\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843730 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a0e132bd-4673-48b5-9362-32781a1f9405-ca-trust-extracted\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843749 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-ca\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843765 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/499b8a50-5954-4979-9473-63f0bae378f0-metrics-tls\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843819 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-service-ca\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843857 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-serving-cert\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843908 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-serving-cert\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.843996 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/50135dde-6b69-4044-b285-0b33b617e7a9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.844067 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsx2z\" (UniqueName: \"kubernetes.io/projected/499b8a50-5954-4979-9473-63f0bae378f0-kube-api-access-tsx2z\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:42 crc kubenswrapper[4650]: E0201 07:25:42.845386 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.345369445 +0000 UTC m=+142.068467690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.846892 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a0e132bd-4673-48b5-9362-32781a1f9405-ca-trust-extracted\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.848216 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-registry-tls\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.849791 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-registry-certificates\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.852912 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a0e132bd-4673-48b5-9362-32781a1f9405-installation-pull-secrets\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.860336 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.878423 4650 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.914981 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5"] Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948277 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948571 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsx2z\" (UniqueName: \"kubernetes.io/projected/499b8a50-5954-4979-9473-63f0bae378f0-kube-api-access-tsx2z\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948594 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/badf229a-c84d-41c1-b283-e473e15ed647-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948615 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/11275769-14c7-40e0-a0d4-378617ac97d3-proxy-tls\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948645 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/499b8a50-5954-4979-9473-63f0bae378f0-config-volume\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948663 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g47bf\" (UniqueName: \"kubernetes.io/projected/bee30637-b353-4aab-a8b6-9e26aa6862c4-kube-api-access-g47bf\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948699 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948716 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86mds\" (UniqueName: \"kubernetes.io/projected/858b8a5b-de8c-4e6d-bdd3-834e1249e731-kube-api-access-86mds\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948736 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7lmz\" (UniqueName: \"kubernetes.io/projected/50135dde-6b69-4044-b285-0b33b617e7a9-kube-api-access-q7lmz\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948758 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-plugins-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948775 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0d96618-e665-41d9-921f-83167ba4f6f6-proxy-tls\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948791 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/50135dde-6b69-4044-b285-0b33b617e7a9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948808 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d4aa-0c8b-4d30-85fb-870259c021ae-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-b7kgx\" (UID: \"0d41d4aa-0c8b-4d30-85fb-870259c021ae\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948828 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpbm5\" (UniqueName: \"kubernetes.io/projected/e3b2ad4a-8a06-467b-a83a-f203dd935f9f-kube-api-access-lpbm5\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9v8k\" (UID: \"e3b2ad4a-8a06-467b-a83a-f203dd935f9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948845 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvsq6\" (UniqueName: \"kubernetes.io/projected/078b45eb-853f-4560-888a-8ba2928a847b-kube-api-access-cvsq6\") pod \"ingress-canary-jr9qf\" (UID: \"078b45eb-853f-4560-888a-8ba2928a847b\") " pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948858 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-mountpoint-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948875 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-metrics-certs\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948893 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpzfl\" (UniqueName: \"kubernetes.io/projected/d4fec23f-d6a0-4975-b969-bc01b7dab696-kube-api-access-kpzfl\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948909 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a636f07-7f37-4531-a48c-4851172534e9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-z9whs\" (UID: \"0a636f07-7f37-4531-a48c-4851172534e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948929 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-socket-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948949 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/858b8a5b-de8c-4e6d-bdd3-834e1249e731-profile-collector-cert\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948967 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-config\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.948983 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b990a3f9-509d-405d-8ae1-cdcb4f752f93-certs\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:42 crc kubenswrapper[4650]: E0201 07:25:42.949069 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.449009728 +0000 UTC m=+142.172107973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.951174 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-bound-sa-token\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.951565 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" event={"ID":"a2f10408-e875-4afe-89e3-9c63b8f4b2dc","Type":"ContainerStarted","Data":"49699d12a1c9a3e31d1095cdb3c302f65108e2feea13c25f61b0a3f87d36cf5b"} Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.961702 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-snf8v" event={"ID":"2298718f-d9f4-4714-acbb-01739d0c7b62","Type":"ContainerStarted","Data":"8cf3c8759ed91762a8c0ae98690b5c75f66d56cf440cb7b927ac226006c6c09f"} Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.963684 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/50135dde-6b69-4044-b285-0b33b617e7a9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.964114 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/499b8a50-5954-4979-9473-63f0bae378f0-config-volume\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.964468 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/badf229a-c84d-41c1-b283-e473e15ed647-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.965889 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbctd\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-kube-api-access-gbctd\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.967800 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-config\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.969536 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-trusted-ca\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.969593 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e56eb9-20b6-40e2-bae7-64379aebe1ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.969898 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9jrg\" (UniqueName: \"kubernetes.io/projected/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-kube-api-access-c9jrg\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.969945 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/858b8a5b-de8c-4e6d-bdd3-834e1249e731-srv-cert\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.969976 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8262\" (UniqueName: \"kubernetes.io/projected/b0d96618-e665-41d9-921f-83167ba4f6f6-kube-api-access-b8262\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.969999 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/01f92651-6a98-481d-8dc6-041103fc10d4-signing-key\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970385 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm7cw\" (UniqueName: \"kubernetes.io/projected/0a636f07-7f37-4531-a48c-4851172534e9-kube-api-access-hm7cw\") pod \"cluster-samples-operator-665b6dd947-z9whs\" (UID: \"0a636f07-7f37-4531-a48c-4851172534e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970411 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6blmp\" (UniqueName: \"kubernetes.io/projected/b990a3f9-509d-405d-8ae1-cdcb4f752f93-kube-api-access-6blmp\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970440 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5v7c\" (UniqueName: \"kubernetes.io/projected/adc774cf-13b6-49b9-a5a7-a3816d6042a9-kube-api-access-q5v7c\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970470 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970495 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b0d96618-e665-41d9-921f-83167ba4f6f6-images\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970515 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-csi-data-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970547 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970577 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-service-ca\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970610 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/671ece21-a48b-4e84-bc67-7e34bbe90a6b-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sl4ph\" (UID: \"671ece21-a48b-4e84-bc67-7e34bbe90a6b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970632 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-registration-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970659 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-serving-cert\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970720 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-default-certificate\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970756 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970784 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adc774cf-13b6-49b9-a5a7-a3816d6042a9-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970812 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/01f92651-6a98-481d-8dc6-041103fc10d4-signing-cabundle\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970884 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfqdk\" (UniqueName: \"kubernetes.io/projected/57fbc47e-a1c0-44f5-abd5-40a696f37a37-kube-api-access-cfqdk\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970925 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-config\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970950 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bee30637-b353-4aab-a8b6-9e26aa6862c4-serving-cert\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970972 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27e56eb9-20b6-40e2-bae7-64379aebe1ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.970994 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-client\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971084 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/57fbc47e-a1c0-44f5-abd5-40a696f37a37-tmpfs\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971110 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a139c37-a580-476f-a35b-e5daba038dbc-secret-volume\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971159 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-config\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971188 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/50135dde-6b69-4044-b285-0b33b617e7a9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971308 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bee30637-b353-4aab-a8b6-9e26aa6862c4-config\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971335 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cl5qp\" (UniqueName: \"kubernetes.io/projected/01f92651-6a98-481d-8dc6-041103fc10d4-kube-api-access-cl5qp\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971355 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67v7g\" (UniqueName: \"kubernetes.io/projected/2a139c37-a580-476f-a35b-e5daba038dbc-kube-api-access-67v7g\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971380 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/badf229a-c84d-41c1-b283-e473e15ed647-config\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971400 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d4fec23f-d6a0-4975-b969-bc01b7dab696-srv-cert\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971421 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/badf229a-c84d-41c1-b283-e473e15ed647-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971444 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dncf5\" (UniqueName: \"kubernetes.io/projected/0d41d4aa-0c8b-4d30-85fb-870259c021ae-kube-api-access-dncf5\") pod \"package-server-manager-789f6589d5-b7kgx\" (UID: \"0d41d4aa-0c8b-4d30-85fb-870259c021ae\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.971468 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0d352c71-f363-4f44-abba-d535c50f6497-service-ca-bundle\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.972256 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/11275769-14c7-40e0-a0d4-378617ac97d3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.972288 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9bfd\" (UniqueName: \"kubernetes.io/projected/11275769-14c7-40e0-a0d4-378617ac97d3-kube-api-access-t9bfd\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.972879 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-service-ca\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.972886 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-trusted-ca\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.973426 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-config\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.974331 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.974436 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0d96618-e665-41d9-921f-83167ba4f6f6-auth-proxy-config\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.974486 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvbpm\" (UniqueName: \"kubernetes.io/projected/671ece21-a48b-4e84-bc67-7e34bbe90a6b-kube-api-access-xvbpm\") pod \"multus-admission-controller-857f4d67dd-sl4ph\" (UID: \"671ece21-a48b-4e84-bc67-7e34bbe90a6b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:42 crc kubenswrapper[4650]: E0201 07:25:42.974921 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.474903013 +0000 UTC m=+142.198001258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.974955 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrsdq\" (UniqueName: \"kubernetes.io/projected/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-kube-api-access-hrsdq\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.976045 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/078b45eb-853f-4560-888a-8ba2928a847b-cert\") pod \"ingress-canary-jr9qf\" (UID: \"078b45eb-853f-4560-888a-8ba2928a847b\") " pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.976398 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d4fec23f-d6a0-4975-b969-bc01b7dab696-profile-collector-cert\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.976689 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-stats-auth\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.976835 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9lrf\" (UniqueName: \"kubernetes.io/projected/9241d1f3-454b-4448-883d-221a5274e596-kube-api-access-d9lrf\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.976985 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e3b2ad4a-8a06-467b-a83a-f203dd935f9f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9v8k\" (UID: \"e3b2ad4a-8a06-467b-a83a-f203dd935f9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.977160 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-ca\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.978018 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-ca\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.980619 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/499b8a50-5954-4979-9473-63f0bae378f0-metrics-tls\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.980776 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adc774cf-13b6-49b9-a5a7-a3816d6042a9-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.981361 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b990a3f9-509d-405d-8ae1-cdcb4f752f93-node-bootstrap-token\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.982313 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-serving-cert\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.982452 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/27e56eb9-20b6-40e2-bae7-64379aebe1ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.982488 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8nw8\" (UniqueName: \"kubernetes.io/projected/784dfbaa-4863-45d9-ac03-05d772fcb779-kube-api-access-v8nw8\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.982522 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jct7\" (UniqueName: \"kubernetes.io/projected/0d352c71-f363-4f44-abba-d535c50f6497-kube-api-access-9jct7\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.984493 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/57fbc47e-a1c0-44f5-abd5-40a696f37a37-webhook-cert\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.984721 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a139c37-a580-476f-a35b-e5daba038dbc-config-volume\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.984790 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/50135dde-6b69-4044-b285-0b33b617e7a9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.984849 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/57fbc47e-a1c0-44f5-abd5-40a696f37a37-apiservice-cert\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.986584 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/50135dde-6b69-4044-b285-0b33b617e7a9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.987901 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/badf229a-c84d-41c1-b283-e473e15ed647-config\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.988171 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-serving-cert\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.994531 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" event={"ID":"6d720972-b11e-48ba-a5ee-9ceef5808130","Type":"ContainerStarted","Data":"99b444066d4731dab5a9d7d2b8a67f4146b35993f1b4e0567635da8b911a52b3"} Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.994602 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" event={"ID":"6d720972-b11e-48ba-a5ee-9ceef5808130","Type":"ContainerStarted","Data":"d0f5679b8a7eb0cc0091e30a1bafc64aa9f1d18dce46c6f36b3268fe73e72a95"} Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.994615 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" event={"ID":"6d720972-b11e-48ba-a5ee-9ceef5808130","Type":"ContainerStarted","Data":"94ce2e7b7c14931b6242b141c557717c281b0abd305a4549ed2d37e5b2ccad41"} Feb 01 07:25:42 crc kubenswrapper[4650]: I0201 07:25:42.997626 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-serving-cert\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.007528 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsx2z\" (UniqueName: \"kubernetes.io/projected/499b8a50-5954-4979-9473-63f0bae378f0-kube-api-access-tsx2z\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.008599 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-etcd-client\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.016248 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/499b8a50-5954-4979-9473-63f0bae378f0-metrics-tls\") pod \"dns-default-jqdv7\" (UID: \"499b8a50-5954-4979-9473-63f0bae378f0\") " pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.022383 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7lmz\" (UniqueName: \"kubernetes.io/projected/50135dde-6b69-4044-b285-0b33b617e7a9-kube-api-access-q7lmz\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.036450 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" event={"ID":"776aae02-31ea-4a89-afa0-11a2bd798df2","Type":"ContainerStarted","Data":"f6967a787831d2ac9fd630620834667452ad98bfd014a3773d6ca8c06c962309"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.066826 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9jrg\" (UniqueName: \"kubernetes.io/projected/01818dd2-dadc-467f-b2c3-4c14c8ff96c5-kube-api-access-c9jrg\") pod \"etcd-operator-b45778765-q9fms\" (UID: \"01818dd2-dadc-467f-b2c3-4c14c8ff96c5\") " pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.068584 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" event={"ID":"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f","Type":"ContainerStarted","Data":"9998252092d175e3f983b6797608e15c6205612c0004a4b8794ad2e7ff647a6f"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.072115 4650 generic.go:334] "Generic (PLEG): container finished" podID="e5471807-9088-4007-b0dc-b68760e76415" containerID="6169b70abf87bd8b0583764754a4725bebf85b6b8d219189f40744d2f6c9438b" exitCode=0 Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.072177 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" event={"ID":"e5471807-9088-4007-b0dc-b68760e76415","Type":"ContainerDied","Data":"6169b70abf87bd8b0583764754a4725bebf85b6b8d219189f40744d2f6c9438b"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.074409 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/50135dde-6b69-4044-b285-0b33b617e7a9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-76xnb\" (UID: \"50135dde-6b69-4044-b285-0b33b617e7a9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.075644 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" event={"ID":"77633ea9-7071-47d2-a623-708e4cd5b99f","Type":"ContainerStarted","Data":"4e92e00486574d77eb576161c3b963fdb523433e5e155bcd4ab584e8e2003f30"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.079481 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" event={"ID":"cdb1c5f5-c67b-45d5-af23-2168beaf2cae","Type":"ContainerStarted","Data":"ec0d8cac756cdfea9d313a7de0b7a9d9e2e0d7d79c9319f869449f8f28b83581"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.087076 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.087452 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dncf5\" (UniqueName: \"kubernetes.io/projected/0d41d4aa-0c8b-4d30-85fb-870259c021ae-kube-api-access-dncf5\") pod \"package-server-manager-789f6589d5-b7kgx\" (UID: \"0d41d4aa-0c8b-4d30-85fb-870259c021ae\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.087556 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0d352c71-f363-4f44-abba-d535c50f6497-service-ca-bundle\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.087657 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/11275769-14c7-40e0-a0d4-378617ac97d3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.087753 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9bfd\" (UniqueName: \"kubernetes.io/projected/11275769-14c7-40e0-a0d4-378617ac97d3-kube-api-access-t9bfd\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.087848 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0d96618-e665-41d9-921f-83167ba4f6f6-auth-proxy-config\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.087936 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvbpm\" (UniqueName: \"kubernetes.io/projected/671ece21-a48b-4e84-bc67-7e34bbe90a6b-kube-api-access-xvbpm\") pod \"multus-admission-controller-857f4d67dd-sl4ph\" (UID: \"671ece21-a48b-4e84-bc67-7e34bbe90a6b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.088074 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/078b45eb-853f-4560-888a-8ba2928a847b-cert\") pod \"ingress-canary-jr9qf\" (UID: \"078b45eb-853f-4560-888a-8ba2928a847b\") " pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.088176 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d4fec23f-d6a0-4975-b969-bc01b7dab696-profile-collector-cert\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.088263 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-stats-auth\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.088346 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9lrf\" (UniqueName: \"kubernetes.io/projected/9241d1f3-454b-4448-883d-221a5274e596-kube-api-access-d9lrf\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.088442 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e3b2ad4a-8a06-467b-a83a-f203dd935f9f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9v8k\" (UID: \"e3b2ad4a-8a06-467b-a83a-f203dd935f9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.088563 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adc774cf-13b6-49b9-a5a7-a3816d6042a9-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.088750 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.588717124 +0000 UTC m=+142.311815369 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.090003 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/11275769-14c7-40e0-a0d4-378617ac97d3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.092434 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0d96618-e665-41d9-921f-83167ba4f6f6-auth-proxy-config\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.100959 4650 generic.go:334] "Generic (PLEG): container finished" podID="a09c591f-ba12-43d6-98bf-003df4aa5813" containerID="f5e4d4661a5530c1e2a28ce8a48261bb72e66b45a8fe921db262f6ea1ff252a8" exitCode=0 Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.101064 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" event={"ID":"a09c591f-ba12-43d6-98bf-003df4aa5813","Type":"ContainerDied","Data":"f5e4d4661a5530c1e2a28ce8a48261bb72e66b45a8fe921db262f6ea1ff252a8"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.101095 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" event={"ID":"a09c591f-ba12-43d6-98bf-003df4aa5813","Type":"ContainerStarted","Data":"386f605bcc06d770319713b3c17f8664e2870d081d0cc3509191cf20d5f0b22b"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.101771 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/badf229a-c84d-41c1-b283-e473e15ed647-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-85g2d\" (UID: \"badf229a-c84d-41c1-b283-e473e15ed647\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.102860 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d4fec23f-d6a0-4975-b969-bc01b7dab696-profile-collector-cert\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.103386 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0d352c71-f363-4f44-abba-d535c50f6497-service-ca-bundle\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.103537 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b990a3f9-509d-405d-8ae1-cdcb4f752f93-node-bootstrap-token\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.103695 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/27e56eb9-20b6-40e2-bae7-64379aebe1ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.103815 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8nw8\" (UniqueName: \"kubernetes.io/projected/784dfbaa-4863-45d9-ac03-05d772fcb779-kube-api-access-v8nw8\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.103886 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jct7\" (UniqueName: \"kubernetes.io/projected/0d352c71-f363-4f44-abba-d535c50f6497-kube-api-access-9jct7\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.103970 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/57fbc47e-a1c0-44f5-abd5-40a696f37a37-webhook-cert\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.104914 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a139c37-a580-476f-a35b-e5daba038dbc-config-volume\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105012 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/57fbc47e-a1c0-44f5-abd5-40a696f37a37-apiservice-cert\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105137 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/11275769-14c7-40e0-a0d4-378617ac97d3-proxy-tls\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105217 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g47bf\" (UniqueName: \"kubernetes.io/projected/bee30637-b353-4aab-a8b6-9e26aa6862c4-kube-api-access-g47bf\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105326 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86mds\" (UniqueName: \"kubernetes.io/projected/858b8a5b-de8c-4e6d-bdd3-834e1249e731-kube-api-access-86mds\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105426 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105511 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-plugins-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105592 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0d96618-e665-41d9-921f-83167ba4f6f6-proxy-tls\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105672 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpbm5\" (UniqueName: \"kubernetes.io/projected/e3b2ad4a-8a06-467b-a83a-f203dd935f9f-kube-api-access-lpbm5\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9v8k\" (UID: \"e3b2ad4a-8a06-467b-a83a-f203dd935f9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105750 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d4aa-0c8b-4d30-85fb-870259c021ae-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-b7kgx\" (UID: \"0d41d4aa-0c8b-4d30-85fb-870259c021ae\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105830 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvsq6\" (UniqueName: \"kubernetes.io/projected/078b45eb-853f-4560-888a-8ba2928a847b-kube-api-access-cvsq6\") pod \"ingress-canary-jr9qf\" (UID: \"078b45eb-853f-4560-888a-8ba2928a847b\") " pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105900 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-mountpoint-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.105970 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpzfl\" (UniqueName: \"kubernetes.io/projected/d4fec23f-d6a0-4975-b969-bc01b7dab696-kube-api-access-kpzfl\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.106058 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-metrics-certs\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.106145 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a636f07-7f37-4531-a48c-4851172534e9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-z9whs\" (UID: \"0a636f07-7f37-4531-a48c-4851172534e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.106217 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-socket-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.106290 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/858b8a5b-de8c-4e6d-bdd3-834e1249e731-profile-collector-cert\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.106359 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b990a3f9-509d-405d-8ae1-cdcb4f752f93-certs\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.106462 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e56eb9-20b6-40e2-bae7-64379aebe1ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.106553 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/858b8a5b-de8c-4e6d-bdd3-834e1249e731-srv-cert\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116663 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/01f92651-6a98-481d-8dc6-041103fc10d4-signing-key\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116715 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm7cw\" (UniqueName: \"kubernetes.io/projected/0a636f07-7f37-4531-a48c-4851172534e9-kube-api-access-hm7cw\") pod \"cluster-samples-operator-665b6dd947-z9whs\" (UID: \"0a636f07-7f37-4531-a48c-4851172534e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116747 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8262\" (UniqueName: \"kubernetes.io/projected/b0d96618-e665-41d9-921f-83167ba4f6f6-kube-api-access-b8262\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116774 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5v7c\" (UniqueName: \"kubernetes.io/projected/adc774cf-13b6-49b9-a5a7-a3816d6042a9-kube-api-access-q5v7c\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116798 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116821 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6blmp\" (UniqueName: \"kubernetes.io/projected/b990a3f9-509d-405d-8ae1-cdcb4f752f93-kube-api-access-6blmp\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116849 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b0d96618-e665-41d9-921f-83167ba4f6f6-images\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116872 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-csi-data-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116896 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116928 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/671ece21-a48b-4e84-bc67-7e34bbe90a6b-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sl4ph\" (UID: \"671ece21-a48b-4e84-bc67-7e34bbe90a6b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116950 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-registration-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.116995 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-default-certificate\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117042 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117074 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adc774cf-13b6-49b9-a5a7-a3816d6042a9-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117099 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/01f92651-6a98-481d-8dc6-041103fc10d4-signing-cabundle\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117124 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfqdk\" (UniqueName: \"kubernetes.io/projected/57fbc47e-a1c0-44f5-abd5-40a696f37a37-kube-api-access-cfqdk\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117163 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-config\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117187 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bee30637-b353-4aab-a8b6-9e26aa6862c4-serving-cert\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117214 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27e56eb9-20b6-40e2-bae7-64379aebe1ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117259 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/57fbc47e-a1c0-44f5-abd5-40a696f37a37-tmpfs\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117283 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a139c37-a580-476f-a35b-e5daba038dbc-secret-volume\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.117305 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bee30637-b353-4aab-a8b6-9e26aa6862c4-config\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.118620 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a139c37-a580-476f-a35b-e5daba038dbc-config-volume\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.119170 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-socket-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.111315 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b990a3f9-509d-405d-8ae1-cdcb4f752f93-node-bootstrap-token\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.107619 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" event={"ID":"416589cc-479e-45e0-8fad-2ccd30115769","Type":"ContainerStarted","Data":"e79a2e0b314e56079798feb15955d302ce60a9c18553879678c3570368a0c00f"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.124084 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-plugins-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.124661 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-mountpoint-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.126422 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b0d96618-e665-41d9-921f-83167ba4f6f6-images\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.127449 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.127948 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e56eb9-20b6-40e2-bae7-64379aebe1ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.128123 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xfg9f" event={"ID":"169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4","Type":"ContainerStarted","Data":"ba677d7ceace435cdac7fd53ef1b33dbfb11426da240ffa9c2f833b86cfd53e2"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.128181 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xfg9f" event={"ID":"169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4","Type":"ContainerStarted","Data":"04cb5fdfe20d1d0655058940580f44c0c8d49f8a7b9addcecb106ebb06a188fc"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.130229 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-xfg9f" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.136108 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/11275769-14c7-40e0-a0d4-378617ac97d3-proxy-tls\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.136231 4650 patch_prober.go:28] interesting pod/downloads-7954f5f757-xfg9f container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.136284 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xfg9f" podUID="169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.138621 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bee30637-b353-4aab-a8b6-9e26aa6862c4-config\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.138713 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67v7g\" (UniqueName: \"kubernetes.io/projected/2a139c37-a580-476f-a35b-e5daba038dbc-kube-api-access-67v7g\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.138742 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cl5qp\" (UniqueName: \"kubernetes.io/projected/01f92651-6a98-481d-8dc6-041103fc10d4-kube-api-access-cl5qp\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.138764 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d4fec23f-d6a0-4975-b969-bc01b7dab696-srv-cert\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.141822 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-config\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.149876 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/01f92651-6a98-481d-8dc6-041103fc10d4-signing-cabundle\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.153642 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" event={"ID":"6568a223-ba53-4690-9378-08b043d9db27","Type":"ContainerStarted","Data":"cfa32a7a59035255d2bac7b3a4321aa5ebeaffdfe40618d493cedb0900f9536b"} Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.154832 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.158725 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adc774cf-13b6-49b9-a5a7-a3816d6042a9-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.165295 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.166063 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-csi-data-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.166456 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adc774cf-13b6-49b9-a5a7-a3816d6042a9-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.166529 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9241d1f3-454b-4448-883d-221a5274e596-registration-dir\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.167361 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e3b2ad4a-8a06-467b-a83a-f203dd935f9f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9v8k\" (UID: \"e3b2ad4a-8a06-467b-a83a-f203dd935f9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.167824 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a139c37-a580-476f-a35b-e5daba038dbc-secret-volume\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.168149 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bee30637-b353-4aab-a8b6-9e26aa6862c4-serving-cert\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.168242 4650 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5fhkk container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.168287 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.168793 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d4fec23f-d6a0-4975-b969-bc01b7dab696-srv-cert\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.169098 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm"] Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.169152 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv"] Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.170151 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-default-certificate\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.170709 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrsdq\" (UniqueName: \"kubernetes.io/projected/d2ca8c50-d0b0-4ad6-beda-ca1722a143bf-kube-api-access-hrsdq\") pod \"console-operator-58897d9998-s4bcm\" (UID: \"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf\") " pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.171183 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/858b8a5b-de8c-4e6d-bdd3-834e1249e731-srv-cert\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.161824 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/078b45eb-853f-4560-888a-8ba2928a847b-cert\") pod \"ingress-canary-jr9qf\" (UID: \"078b45eb-853f-4560-888a-8ba2928a847b\") " pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.171818 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/858b8a5b-de8c-4e6d-bdd3-834e1249e731-profile-collector-cert\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.172843 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-stats-auth\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.173330 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d4aa-0c8b-4d30-85fb-870259c021ae-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-b7kgx\" (UID: \"0d41d4aa-0c8b-4d30-85fb-870259c021ae\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.177457 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.179301 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/671ece21-a48b-4e84-bc67-7e34bbe90a6b-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sl4ph\" (UID: \"671ece21-a48b-4e84-bc67-7e34bbe90a6b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.179576 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b990a3f9-509d-405d-8ae1-cdcb4f752f93-certs\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.179710 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.179936 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27e56eb9-20b6-40e2-bae7-64379aebe1ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.184577 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a636f07-7f37-4531-a48c-4851172534e9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-z9whs\" (UID: \"0a636f07-7f37-4531-a48c-4851172534e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.186044 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvbpm\" (UniqueName: \"kubernetes.io/projected/671ece21-a48b-4e84-bc67-7e34bbe90a6b-kube-api-access-xvbpm\") pod \"multus-admission-controller-857f4d67dd-sl4ph\" (UID: \"671ece21-a48b-4e84-bc67-7e34bbe90a6b\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.186839 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0d96618-e665-41d9-921f-83167ba4f6f6-proxy-tls\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.190615 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dncf5\" (UniqueName: \"kubernetes.io/projected/0d41d4aa-0c8b-4d30-85fb-870259c021ae-kube-api-access-dncf5\") pod \"package-server-manager-789f6589d5-b7kgx\" (UID: \"0d41d4aa-0c8b-4d30-85fb-870259c021ae\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.194562 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9bfd\" (UniqueName: \"kubernetes.io/projected/11275769-14c7-40e0-a0d4-378617ac97d3-kube-api-access-t9bfd\") pod \"machine-config-controller-84d6567774-8zlcr\" (UID: \"11275769-14c7-40e0-a0d4-378617ac97d3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.210205 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.221178 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9lrf\" (UniqueName: \"kubernetes.io/projected/9241d1f3-454b-4448-883d-221a5274e596-kube-api-access-d9lrf\") pod \"csi-hostpathplugin-nsmmn\" (UID: \"9241d1f3-454b-4448-883d-221a5274e596\") " pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: W0201 07:25:43.226282 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod138bf70b_dbf6_41a8_8595_6451279e8080.slice/crio-67ab0ce3ba3ec4c1acacc0982e45f28b6a1c137f191caa62999ca8894ceeef65 WatchSource:0}: Error finding container 67ab0ce3ba3ec4c1acacc0982e45f28b6a1c137f191caa62999ca8894ceeef65: Status 404 returned error can't find the container with id 67ab0ce3ba3ec4c1acacc0982e45f28b6a1c137f191caa62999ca8894ceeef65 Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.235994 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8nw8\" (UniqueName: \"kubernetes.io/projected/784dfbaa-4863-45d9-ac03-05d772fcb779-kube-api-access-v8nw8\") pod \"marketplace-operator-79b997595-tprml\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.250096 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.251447 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.253488 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.753466902 +0000 UTC m=+142.476565147 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.255669 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jct7\" (UniqueName: \"kubernetes.io/projected/0d352c71-f363-4f44-abba-d535c50f6497-kube-api-access-9jct7\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.271114 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/27e56eb9-20b6-40e2-bae7-64379aebe1ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fthm2\" (UID: \"27e56eb9-20b6-40e2-bae7-64379aebe1ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.291438 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpbm5\" (UniqueName: \"kubernetes.io/projected/e3b2ad4a-8a06-467b-a83a-f203dd935f9f-kube-api-access-lpbm5\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9v8k\" (UID: \"e3b2ad4a-8a06-467b-a83a-f203dd935f9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.295753 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.302926 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.312394 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86mds\" (UniqueName: \"kubernetes.io/projected/858b8a5b-de8c-4e6d-bdd3-834e1249e731-kube-api-access-86mds\") pod \"catalog-operator-68c6474976-28b2s\" (UID: \"858b8a5b-de8c-4e6d-bdd3-834e1249e731\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.323244 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.337296 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g47bf\" (UniqueName: \"kubernetes.io/projected/bee30637-b353-4aab-a8b6-9e26aa6862c4-kube-api-access-g47bf\") pod \"service-ca-operator-777779d784-qh4nl\" (UID: \"bee30637-b353-4aab-a8b6-9e26aa6862c4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.338737 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.367159 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/57fbc47e-a1c0-44f5-abd5-40a696f37a37-tmpfs\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.367188 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/01f92651-6a98-481d-8dc6-041103fc10d4-signing-key\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.368506 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.368814 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.369015 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d352c71-f363-4f44-abba-d535c50f6497-metrics-certs\") pod \"router-default-5444994796-zkcsv\" (UID: \"0d352c71-f363-4f44-abba-d535c50f6497\") " pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.369295 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.369546 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.869522957 +0000 UTC m=+142.592621202 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.369740 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.370537 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/57fbc47e-a1c0-44f5-abd5-40a696f37a37-webhook-cert\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.370538 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/57fbc47e-a1c0-44f5-abd5-40a696f37a37-apiservice-cert\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.371200 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.871182395 +0000 UTC m=+142.594280640 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.382264 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.389329 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3c2c4c5-34a2-46e8-a551-d6e171a10dd0-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-zcd4l\" (UID: \"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.391439 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvsq6\" (UniqueName: \"kubernetes.io/projected/078b45eb-853f-4560-888a-8ba2928a847b-kube-api-access-cvsq6\") pod \"ingress-canary-jr9qf\" (UID: \"078b45eb-853f-4560-888a-8ba2928a847b\") " pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.394464 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpzfl\" (UniqueName: \"kubernetes.io/projected/d4fec23f-d6a0-4975-b969-bc01b7dab696-kube-api-access-kpzfl\") pod \"olm-operator-6b444d44fb-hfv5r\" (UID: \"d4fec23f-d6a0-4975-b969-bc01b7dab696\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.395286 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.410629 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.413474 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8262\" (UniqueName: \"kubernetes.io/projected/b0d96618-e665-41d9-921f-83167ba4f6f6-kube-api-access-b8262\") pod \"machine-config-operator-74547568cd-gpqvq\" (UID: \"b0d96618-e665-41d9-921f-83167ba4f6f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.436084 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.437249 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm7cw\" (UniqueName: \"kubernetes.io/projected/0a636f07-7f37-4531-a48c-4851172534e9-kube-api-access-hm7cw\") pod \"cluster-samples-operator-665b6dd947-z9whs\" (UID: \"0a636f07-7f37-4531-a48c-4851172534e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.442757 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.450935 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.460241 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfqdk\" (UniqueName: \"kubernetes.io/projected/57fbc47e-a1c0-44f5-abd5-40a696f37a37-kube-api-access-cfqdk\") pod \"packageserver-d55dfcdfc-5tcfm\" (UID: \"57fbc47e-a1c0-44f5-abd5-40a696f37a37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.470350 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.470631 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.970590617 +0000 UTC m=+142.693688862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.471525 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.471952 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:43.971936135 +0000 UTC m=+142.695034380 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.477320 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5v7c\" (UniqueName: \"kubernetes.io/projected/adc774cf-13b6-49b9-a5a7-a3816d6042a9-kube-api-access-q5v7c\") pod \"kube-storage-version-migrator-operator-b67b599dd-g8mcc\" (UID: \"adc774cf-13b6-49b9-a5a7-a3816d6042a9\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.490672 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-jr9qf" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.495236 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jqdv7"] Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.506916 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.526476 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67v7g\" (UniqueName: \"kubernetes.io/projected/2a139c37-a580-476f-a35b-e5daba038dbc-kube-api-access-67v7g\") pod \"collect-profiles-29498835-4crxh\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.539787 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cl5qp\" (UniqueName: \"kubernetes.io/projected/01f92651-6a98-481d-8dc6-041103fc10d4-kube-api-access-cl5qp\") pod \"service-ca-9c57cc56f-s5qql\" (UID: \"01f92651-6a98-481d-8dc6-041103fc10d4\") " pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:43 crc kubenswrapper[4650]: W0201 07:25:43.548234 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod499b8a50_5954_4979_9473_63f0bae378f0.slice/crio-96ed225b57f17c925bbe9daba5534e0714c91c38c86b55391ff721406abcd064 WatchSource:0}: Error finding container 96ed225b57f17c925bbe9daba5534e0714c91c38c86b55391ff721406abcd064: Status 404 returned error can't find the container with id 96ed225b57f17c925bbe9daba5534e0714c91c38c86b55391ff721406abcd064 Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.556102 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6blmp\" (UniqueName: \"kubernetes.io/projected/b990a3f9-509d-405d-8ae1-cdcb4f752f93-kube-api-access-6blmp\") pod \"machine-config-server-f5d8l\" (UID: \"b990a3f9-509d-405d-8ae1-cdcb4f752f93\") " pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.572543 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.572722 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.072684676 +0000 UTC m=+142.795782921 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.572905 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.573335 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.073319974 +0000 UTC m=+142.796418219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.631402 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.646489 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.671793 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.673548 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.673964 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.173948121 +0000 UTC m=+142.897046366 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.686541 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.704589 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.727469 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.761871 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.768504 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-f5d8l" Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.780911 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.781306 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.281288939 +0000 UTC m=+143.004387184 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.873594 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-s4bcm"] Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.885326 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.885534 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.385494018 +0000 UTC m=+143.108592263 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.885752 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.886156 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.386138456 +0000 UTC m=+143.109236701 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.973218 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr"] Feb 01 07:25:43 crc kubenswrapper[4650]: I0201 07:25:43.986902 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:43 crc kubenswrapper[4650]: E0201 07:25:43.987251 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.487227386 +0000 UTC m=+143.210325631 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.076931 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sl4ph"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.089579 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.090174 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.590148559 +0000 UTC m=+143.313246804 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.146291 4650 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-5rj7j container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.146409 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.155040 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.169666 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-snf8v" event={"ID":"2298718f-d9f4-4714-acbb-01739d0c7b62","Type":"ContainerStarted","Data":"aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.173494 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" event={"ID":"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf","Type":"ContainerStarted","Data":"6d863d203ecff9636057dd052b591b31d29e7b88811ff15423b9a357d3498201"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.174723 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jqdv7" event={"ID":"499b8a50-5954-4979-9473-63f0bae378f0","Type":"ContainerStarted","Data":"96ed225b57f17c925bbe9daba5534e0714c91c38c86b55391ff721406abcd064"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.177450 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" event={"ID":"42bb4278-3f91-4824-ba06-22af4099f7e4","Type":"ContainerStarted","Data":"a59c688ead981f4f8cc697de430030ab7b1c09a38083d34dbf0962e9948645bf"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.181549 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" event={"ID":"cdb1c5f5-c67b-45d5-af23-2168beaf2cae","Type":"ContainerStarted","Data":"06d5087f8109f613f07ab04773d0ea71a5fa89ff8971de91be214f53d6c64dcd"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.190123 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.190663 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.690639702 +0000 UTC m=+143.413737947 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.190789 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" event={"ID":"a2f10408-e875-4afe-89e3-9c63b8f4b2dc","Type":"ContainerDied","Data":"d7ceb7f3f996fa2205c1c422f099288a619dd51a769cd1dbbd2de20d68407053"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.190934 4650 generic.go:334] "Generic (PLEG): container finished" podID="a2f10408-e875-4afe-89e3-9c63b8f4b2dc" containerID="d7ceb7f3f996fa2205c1c422f099288a619dd51a769cd1dbbd2de20d68407053" exitCode=0 Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.201069 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" event={"ID":"138bf70b-dbf6-41a8-8595-6451279e8080","Type":"ContainerStarted","Data":"67ab0ce3ba3ec4c1acacc0982e45f28b6a1c137f191caa62999ca8894ceeef65"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.207467 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" event={"ID":"776aae02-31ea-4a89-afa0-11a2bd798df2","Type":"ContainerStarted","Data":"c9fda915a737b5a284cd1f2bd62fbea3c973fc25de26363586053d8f39185040"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.208930 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" event={"ID":"acd8745a-8f0b-4589-be83-b3673735409e","Type":"ContainerStarted","Data":"ab1e2febbc6fc7e3fc3f6d0f5b8b59a619cd9964a9ddffbe207c74b280e5ddfb"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.215930 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-q9fms"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.222194 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" event={"ID":"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f","Type":"ContainerStarted","Data":"dcc40fde928f502d7be3199f6b76e5018f8945577058bafdcb02b81f447a5bfa"} Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.222738 4650 patch_prober.go:28] interesting pod/downloads-7954f5f757-xfg9f container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.222780 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xfg9f" podUID="169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.225038 4650 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5fhkk container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.225156 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.248857 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.292314 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.292861 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.792839524 +0000 UTC m=+143.515937759 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.298453 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.393097 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-jr9qf"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.393819 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.396792 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:44.896767665 +0000 UTC m=+143.619865910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.470585 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.496425 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.500582 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.000565482 +0000 UTC m=+143.723663727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.510897 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.539161 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-sj7gj" podStartSLOduration=122.539124067 podStartE2EDuration="2m2.539124067s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:44.494134099 +0000 UTC m=+143.217232344" watchObservedRunningTime="2026-02-01 07:25:44.539124067 +0000 UTC m=+143.262222322" Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.561450 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.579949 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-xfg9f" podStartSLOduration=122.579917135 podStartE2EDuration="2m2.579917135s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:44.576657082 +0000 UTC m=+143.299755337" watchObservedRunningTime="2026-02-01 07:25:44.579917135 +0000 UTC m=+143.303015400" Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.610429 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.610832 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.110809142 +0000 UTC m=+143.833907377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.638536 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tprml"] Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.640944 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" podStartSLOduration=121.640931897 podStartE2EDuration="2m1.640931897s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:44.640888146 +0000 UTC m=+143.363986401" watchObservedRunningTime="2026-02-01 07:25:44.640931897 +0000 UTC m=+143.364030142" Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.723746 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.725323 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.225307273 +0000 UTC m=+143.948405518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: W0201 07:25:44.756200 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod784dfbaa_4863_45d9_ac03_05d772fcb779.slice/crio-ab168db22814f3650f614eb96145ae9893bf8a524a72dea59ff15e532706210a WatchSource:0}: Error finding container ab168db22814f3650f614eb96145ae9893bf8a524a72dea59ff15e532706210a: Status 404 returned error can't find the container with id ab168db22814f3650f614eb96145ae9893bf8a524a72dea59ff15e532706210a Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.829857 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.830515 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.330450598 +0000 UTC m=+144.053548853 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:44 crc kubenswrapper[4650]: I0201 07:25:44.934389 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:44 crc kubenswrapper[4650]: E0201 07:25:44.934790 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.43477258 +0000 UTC m=+144.157870825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.004589 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-snf8v" podStartSLOduration=123.004566992 podStartE2EDuration="2m3.004566992s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:44.992120229 +0000 UTC m=+143.715218474" watchObservedRunningTime="2026-02-01 07:25:45.004566992 +0000 UTC m=+143.727665237" Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.044642 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.046499 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.546453371 +0000 UTC m=+144.269551656 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.150797 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.151363 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.651331518 +0000 UTC m=+144.374429763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.253258 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.260160 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.760130337 +0000 UTC m=+144.483228592 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.286367 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k"] Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.362079 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.362439 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.862424482 +0000 UTC m=+144.585522727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.397878 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nsmmn"] Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.463128 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.463534 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:45.963517952 +0000 UTC m=+144.686616197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.537776 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-tfv5c" podStartSLOduration=123.53775148 podStartE2EDuration="2m3.53775148s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:45.534795086 +0000 UTC m=+144.257893331" watchObservedRunningTime="2026-02-01 07:25:45.53775148 +0000 UTC m=+144.260849715" Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.557199 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" event={"ID":"42bb4278-3f91-4824-ba06-22af4099f7e4","Type":"ContainerStarted","Data":"404636e9f575bc59a84a3d4554a93e7730f39aac804f2ea26da67d58c7456a7c"} Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.573217 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.573835 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.073812914 +0000 UTC m=+144.796911159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.579238 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx"] Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.623296 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" event={"ID":"e5471807-9088-4007-b0dc-b68760e76415","Type":"ContainerStarted","Data":"7da74a93e5827b22091393d160dd84c23020e504d01ba6c03dc50bb5bc3050da"} Feb 01 07:25:45 crc kubenswrapper[4650]: W0201 07:25:45.681279 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3b2ad4a_8a06_467b_a83a_f203dd935f9f.slice/crio-5624cc171bf89efaf882632ecdddca64f1a9fce98e13c08e7e800800ce0db468 WatchSource:0}: Error finding container 5624cc171bf89efaf882632ecdddca64f1a9fce98e13c08e7e800800ce0db468: Status 404 returned error can't find the container with id 5624cc171bf89efaf882632ecdddca64f1a9fce98e13c08e7e800800ce0db468 Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.685783 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.689155 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.189112898 +0000 UTC m=+144.912211133 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.707536 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" podStartSLOduration=123.70750834 podStartE2EDuration="2m3.70750834s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:45.699976646 +0000 UTC m=+144.423074901" watchObservedRunningTime="2026-02-01 07:25:45.70750834 +0000 UTC m=+144.430606585" Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.763829 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l"] Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.798899 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.799457 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.29943936 +0000 UTC m=+145.022537605 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.809714 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" event={"ID":"77633ea9-7071-47d2-a623-708e4cd5b99f","Type":"ContainerStarted","Data":"65b1e77d46d6d7432076030a3897e01b1ebef90ad7e60922871fcd6b75c06983"} Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.858100 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" event={"ID":"671ece21-a48b-4e84-bc67-7e34bbe90a6b","Type":"ContainerStarted","Data":"1f2232bc01365b008733a2b2b3dcd2117447fc8b7232b4cb2cf269928cb18cc3"} Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.878843 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" event={"ID":"858b8a5b-de8c-4e6d-bdd3-834e1249e731","Type":"ContainerStarted","Data":"f5406570beb5b3d8f2b2302acd38c495a752d535ead367818f8f3df98e428dbf"} Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.885875 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r"] Feb 01 07:25:45 crc kubenswrapper[4650]: I0201 07:25:45.900840 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:45 crc kubenswrapper[4650]: E0201 07:25:45.901332 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.401306033 +0000 UTC m=+145.124404288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:45.993981 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-s5qql"] Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:45.994043 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" event={"ID":"bee30637-b353-4aab-a8b6-9e26aa6862c4","Type":"ContainerStarted","Data":"5df27c29f57f0b2f02eb94ca94d0ab7c4b447d86a59179085813262603d8d5f1"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.025008 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc"] Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.026425 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.026888 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.526867388 +0000 UTC m=+145.249965633 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.055314 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" event={"ID":"badf229a-c84d-41c1-b283-e473e15ed647","Type":"ContainerStarted","Data":"e1591e171b0f69ac95b978329e29d0df546944bf2ad273945ce77dfe5982a829"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.081663 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" event={"ID":"acd8745a-8f0b-4589-be83-b3673735409e","Type":"ContainerStarted","Data":"5f31243ec566fec6115d412afdc1bc736a3f484df4324e92e40237ce97295be2"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.107016 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh"] Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.141165 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.141338 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.641313657 +0000 UTC m=+145.364411902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.141494 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.141952 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.641932475 +0000 UTC m=+145.365030720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.206979 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2rsm" podStartSLOduration=124.206959901 podStartE2EDuration="2m4.206959901s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:46.121507765 +0000 UTC m=+144.844606030" watchObservedRunningTime="2026-02-01 07:25:46.206959901 +0000 UTC m=+144.930058136" Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.208773 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs"] Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.242088 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.242471 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.742455839 +0000 UTC m=+145.465554074 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.242970 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" event={"ID":"6bfa9091-95a1-4eb6-b4bf-d168d2a61a2f","Type":"ContainerStarted","Data":"2990b2d20dc0d6de464e6fdaeea3e3b52c2cb4b509fd80c0653de6b114e409e2"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.247721 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" event={"ID":"27e56eb9-20b6-40e2-bae7-64379aebe1ad","Type":"ContainerStarted","Data":"bc33248cc2fbda3dcca546ddf88ae28ae1d807382e2d6cda78ed2a900ce45195"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.260502 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" event={"ID":"11275769-14c7-40e0-a0d4-378617ac97d3","Type":"ContainerStarted","Data":"cee6ede39dd22ec93bf4332ef7cb06a27d1f18bb59b8732b54b47876d6b8cf3a"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.311838 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rkvl7" podStartSLOduration=124.311809718 podStartE2EDuration="2m4.311809718s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:46.297469581 +0000 UTC m=+145.020567826" watchObservedRunningTime="2026-02-01 07:25:46.311809718 +0000 UTC m=+145.034907963" Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.336276 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" event={"ID":"50135dde-6b69-4044-b285-0b33b617e7a9","Type":"ContainerStarted","Data":"0f3e7dff5b67c862fc630eb7ba7d83d8eda4bf856627164759c7ce059616c469"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.348127 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.350198 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.850176438 +0000 UTC m=+145.573274683 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.387602 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" event={"ID":"784dfbaa-4863-45d9-ac03-05d772fcb779","Type":"ContainerStarted","Data":"ab168db22814f3650f614eb96145ae9893bf8a524a72dea59ff15e532706210a"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.441486 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq"] Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.449217 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.449775 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:46.949754065 +0000 UTC m=+145.672852310 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.456321 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-jr9qf" event={"ID":"078b45eb-853f-4560-888a-8ba2928a847b","Type":"ContainerStarted","Data":"be1ff2784f8177cff4964f7fdd1d0cfa7c9b8c818e9da2457cb44841e1c97e5f"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.500671 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" event={"ID":"01818dd2-dadc-467f-b2c3-4c14c8ff96c5","Type":"ContainerStarted","Data":"b190a22638767e800de6fae5d443aa70ea93ba51a2eadfa55786ce0f85f02007"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.511497 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-jr9qf" podStartSLOduration=6.511474108 podStartE2EDuration="6.511474108s" podCreationTimestamp="2026-02-01 07:25:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:46.510543391 +0000 UTC m=+145.233641636" watchObservedRunningTime="2026-02-01 07:25:46.511474108 +0000 UTC m=+145.234572353" Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.539735 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" event={"ID":"a09c591f-ba12-43d6-98bf-003df4aa5813","Type":"ContainerStarted","Data":"86fc557029ef8690ed0c8907d42240dc243715e237a4d56e5b2f80930af374b5"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.564086 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.569155 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" event={"ID":"138bf70b-dbf6-41a8-8595-6451279e8080","Type":"ContainerStarted","Data":"1a23350e8e5effab53408475b80dc012243c1a7afbf0be04a093b68d6e0c5a85"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.578492 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.579043 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.079010715 +0000 UTC m=+145.802108960 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.596990 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zkcsv" event={"ID":"0d352c71-f363-4f44-abba-d535c50f6497","Type":"ContainerStarted","Data":"e49f0a82238bf73005f2812494df86e53e8b11fc44d0d8680675b50d07b90166"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.671757 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" podStartSLOduration=124.671735548 podStartE2EDuration="2m4.671735548s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:46.626901025 +0000 UTC m=+145.349999280" watchObservedRunningTime="2026-02-01 07:25:46.671735548 +0000 UTC m=+145.394833793" Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.684164 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.684655 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.184637694 +0000 UTC m=+145.907735939 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.684778 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.685985 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.185977622 +0000 UTC m=+145.909075867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.695478 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm"] Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.720334 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-q4psv" podStartSLOduration=124.72031032699999 podStartE2EDuration="2m4.720310327s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:46.718567728 +0000 UTC m=+145.441665973" watchObservedRunningTime="2026-02-01 07:25:46.720310327 +0000 UTC m=+145.443408572" Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.720392 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" event={"ID":"416589cc-479e-45e0-8fad-2ccd30115769","Type":"ContainerStarted","Data":"a10e818e58bba69286a8ae0a636d84cdcce22ddde9ed5a9353e50e240b09c324"} Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.723145 4650 patch_prober.go:28] interesting pod/downloads-7954f5f757-xfg9f container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.723181 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xfg9f" podUID="169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.786595 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.787017 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.286999651 +0000 UTC m=+146.010097896 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.903757 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:46 crc kubenswrapper[4650]: E0201 07:25:46.905761 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.405743682 +0000 UTC m=+146.128841927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:46 crc kubenswrapper[4650]: I0201 07:25:46.962389 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" podStartSLOduration=124.96237149 podStartE2EDuration="2m4.96237149s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:46.960443035 +0000 UTC m=+145.683541280" watchObservedRunningTime="2026-02-01 07:25:46.96237149 +0000 UTC m=+145.685469725" Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.011920 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.012152 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.512128863 +0000 UTC m=+146.235227098 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.012336 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.012987 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.512972457 +0000 UTC m=+146.236070702 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.117274 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.117516 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.617501535 +0000 UTC m=+146.340599770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.220199 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.220772 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.720745426 +0000 UTC m=+146.443843671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.321306 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.321500 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.821471136 +0000 UTC m=+146.544569381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.322233 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.322634 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.822617899 +0000 UTC m=+146.545716144 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.423134 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.423517 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:47.923502803 +0000 UTC m=+146.646601048 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.525100 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.525544 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.02552882 +0000 UTC m=+146.748627065 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.625960 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.626339 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.126324392 +0000 UTC m=+146.849422637 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.730985 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.731446 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.231427996 +0000 UTC m=+146.954526241 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.823070 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" event={"ID":"50135dde-6b69-4044-b285-0b33b617e7a9","Type":"ContainerStarted","Data":"e4f296c47771a648464aeb6755d8cb345e181fea123eea0cb0a09640241bf4b2"} Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.832905 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.833370 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.33334523 +0000 UTC m=+147.056443475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.833511 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.833885 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.333875025 +0000 UTC m=+147.056973270 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.839086 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-f5d8l" event={"ID":"b990a3f9-509d-405d-8ae1-cdcb4f752f93","Type":"ContainerStarted","Data":"8ce7396ec6cd6207a00efbf29bcef77b338f52611faba5776adae13d3a406e36"} Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.840748 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" event={"ID":"adc774cf-13b6-49b9-a5a7-a3816d6042a9","Type":"ContainerStarted","Data":"cc2aa996a8f5fbb0ec59c3be105ed1d7b13fb7f26c5ba3dc87a03ede8dfcd874"} Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.843166 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" event={"ID":"9241d1f3-454b-4448-883d-221a5274e596","Type":"ContainerStarted","Data":"c322ca37f014c1c1c96e451a6ec1992831c1c1fb58b23dbf1c26ca9eace50829"} Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.880101 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jqdv7" event={"ID":"499b8a50-5954-4979-9473-63f0bae378f0","Type":"ContainerStarted","Data":"58efacf043e15f290786ca6ea3d329e389fbfea32117605b01e266ea13114187"} Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.895423 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" event={"ID":"858b8a5b-de8c-4e6d-bdd3-834e1249e731","Type":"ContainerStarted","Data":"fcce23062e0dc90e67b68f878a09f5b44ce5578945a32cfeead9df53100b18cd"} Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.896725 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.908539 4650 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-28b2s container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.908606 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" podUID="858b8a5b-de8c-4e6d-bdd3-834e1249e731" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.934307 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zkcsv" event={"ID":"0d352c71-f363-4f44-abba-d535c50f6497","Type":"ContainerStarted","Data":"345cb31223de0a3ff9c1054a7e698886cd05eb44929bafaab86b3b4ce10d07bf"} Feb 01 07:25:47 crc kubenswrapper[4650]: I0201 07:25:47.935550 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:47 crc kubenswrapper[4650]: E0201 07:25:47.936613 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.436596472 +0000 UTC m=+147.159694717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.004972 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" event={"ID":"77633ea9-7071-47d2-a623-708e4cd5b99f","Type":"ContainerStarted","Data":"274831f69a07615bec339a3eec158a91a6a9366817547cfaee1c5c7e78d783ac"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.024358 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" event={"ID":"d4fec23f-d6a0-4975-b969-bc01b7dab696","Type":"ContainerStarted","Data":"ebf5216022eebbae3ece0d0d705826cbe3dac1779b8dc2e1b229b951ad44cfb4"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.025978 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-76xnb" podStartSLOduration=126.025968669 podStartE2EDuration="2m6.025968669s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:47.867756817 +0000 UTC m=+146.590855082" watchObservedRunningTime="2026-02-01 07:25:48.025968669 +0000 UTC m=+146.749066914" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.027100 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" podStartSLOduration=125.027095391 podStartE2EDuration="2m5.027095391s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.024821257 +0000 UTC m=+146.747919502" watchObservedRunningTime="2026-02-01 07:25:48.027095391 +0000 UTC m=+146.750193636" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.038619 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.039646 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.539628237 +0000 UTC m=+147.262726482 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.041483 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" event={"ID":"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0","Type":"ContainerStarted","Data":"7a0e94d058fd47e12641ebfb473bfb788c976bec8b902f60e7f1375f962a668d"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.059372 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" event={"ID":"b0d96618-e665-41d9-921f-83167ba4f6f6","Type":"ContainerStarted","Data":"50a356d2de07b355ab96dc824be00fd647c80dd6d40ec107d44b36961861ae9f"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.097722 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" event={"ID":"27e56eb9-20b6-40e2-bae7-64379aebe1ad","Type":"ContainerStarted","Data":"366f0f805c12fa4990b93d00a37b090c328a9b9ebefd76288f5ee1d4db1c9872"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.124964 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" event={"ID":"11275769-14c7-40e0-a0d4-378617ac97d3","Type":"ContainerStarted","Data":"13688ecb62bea1e948196b89df236bce742af613442ce97b6822a3a811d46d80"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.143228 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.144469 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.644444433 +0000 UTC m=+147.367542678 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.155290 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-zkcsv" podStartSLOduration=126.155271811 podStartE2EDuration="2m6.155271811s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.089846203 +0000 UTC m=+146.812944448" watchObservedRunningTime="2026-02-01 07:25:48.155271811 +0000 UTC m=+146.878370056" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.157590 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" event={"ID":"784dfbaa-4863-45d9-ac03-05d772fcb779","Type":"ContainerStarted","Data":"9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.158293 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.158590 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-nnskb" podStartSLOduration=126.158584765 podStartE2EDuration="2m6.158584765s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.156732222 +0000 UTC m=+146.879830467" watchObservedRunningTime="2026-02-01 07:25:48.158584765 +0000 UTC m=+146.881683010" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.173444 4650 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-tprml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.173534 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.174455 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" event={"ID":"01f92651-6a98-481d-8dc6-041103fc10d4","Type":"ContainerStarted","Data":"ec530bdd59de63fd7a1d41551ee7c6939f1295236bbac6942924062bbcdfa3c5"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.179671 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" event={"ID":"0d41d4aa-0c8b-4d30-85fb-870259c021ae","Type":"ContainerStarted","Data":"f5e8370073d0e0a685f127b4799dfca306716def9257ecaf2bf168ff17127ef5"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.232697 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fthm2" podStartSLOduration=126.232676659 podStartE2EDuration="2m6.232676659s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.229924061 +0000 UTC m=+146.953022306" watchObservedRunningTime="2026-02-01 07:25:48.232676659 +0000 UTC m=+146.955774904" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.244950 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.246000 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.745976716 +0000 UTC m=+147.469075151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.251611 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" event={"ID":"e5471807-9088-4007-b0dc-b68760e76415","Type":"ContainerStarted","Data":"18c3448ca82e4144e1e39b1aaa4be566d1b95f7a4c35d7010e34a13ba94f708d"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.280440 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" event={"ID":"e3b2ad4a-8a06-467b-a83a-f203dd935f9f","Type":"ContainerStarted","Data":"5624cc171bf89efaf882632ecdddca64f1a9fce98e13c08e7e800800ce0db468"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.284862 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" podStartSLOduration=126.284838849 podStartE2EDuration="2m6.284838849s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.281021861 +0000 UTC m=+147.004120106" watchObservedRunningTime="2026-02-01 07:25:48.284838849 +0000 UTC m=+147.007937094" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.304199 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" event={"ID":"bee30637-b353-4aab-a8b6-9e26aa6862c4","Type":"ContainerStarted","Data":"bed4fce3001ea2a70d667650dc906d1dbd790adf7997a3f85ccae535bc037dc4"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.332222 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" event={"ID":"776aae02-31ea-4a89-afa0-11a2bd798df2","Type":"ContainerStarted","Data":"01ac02885e1b0a069f5813af90de0e227a7b097ebefae503e1a59bf004ebb460"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.345941 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" podStartSLOduration=126.345919214 podStartE2EDuration="2m6.345919214s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.341738505 +0000 UTC m=+147.064836740" watchObservedRunningTime="2026-02-01 07:25:48.345919214 +0000 UTC m=+147.069017459" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.346626 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.362195 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.862160465 +0000 UTC m=+147.585258710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.362280 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.364557 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.864549863 +0000 UTC m=+147.587648108 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.368513 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" event={"ID":"42bb4278-3f91-4824-ba06-22af4099f7e4","Type":"ContainerStarted","Data":"7473e9241596e48bf79eae5920b427503c0c7926785dd2596e6f249cd3a9393a"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.379738 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" event={"ID":"57fbc47e-a1c0-44f5-abd5-40a696f37a37","Type":"ContainerStarted","Data":"792906237f4ff7adbbd6c09eaba99bd1ea1eec780206995e135946d297c3e86e"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.403214 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-jr9qf" event={"ID":"078b45eb-853f-4560-888a-8ba2928a847b","Type":"ContainerStarted","Data":"7d7617a3da4e736a82faa603080fa7cad3f548e628d40936e5360497413dba6b"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.405425 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh4nl" podStartSLOduration=125.405412833 podStartE2EDuration="2m5.405412833s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.404429965 +0000 UTC m=+147.127528240" watchObservedRunningTime="2026-02-01 07:25:48.405412833 +0000 UTC m=+147.128511078" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.443754 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r8xbn" podStartSLOduration=126.443734641 podStartE2EDuration="2m6.443734641s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.441360324 +0000 UTC m=+147.164458579" watchObservedRunningTime="2026-02-01 07:25:48.443734641 +0000 UTC m=+147.166832906" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.454225 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.454262 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" event={"ID":"d2ca8c50-d0b0-4ad6-beda-ca1722a143bf","Type":"ContainerStarted","Data":"db5678ace1485edf9846c1558de2d428fb73315cb59e12175dfeec2893589631"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.454284 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.463527 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.465120 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:48.965098388 +0000 UTC m=+147.688196623 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.471339 4650 patch_prober.go:28] interesting pod/console-operator-58897d9998-s4bcm container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.471438 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" podUID="d2ca8c50-d0b0-4ad6-beda-ca1722a143bf" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.471826 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:48 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:48 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:48 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.472009 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.482112 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" event={"ID":"01818dd2-dadc-467f-b2c3-4c14c8ff96c5","Type":"ContainerStarted","Data":"481c7f076722db17dfca45953b483f15a012ce77eb1e6a97b86d7f97384d8777"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.502338 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" event={"ID":"671ece21-a48b-4e84-bc67-7e34bbe90a6b","Type":"ContainerStarted","Data":"d5747cfc714a9bc7368943f54bd5cd6e9f599c0015ff0fae680c3129c89580d7"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.504565 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" event={"ID":"0a636f07-7f37-4531-a48c-4851172534e9","Type":"ContainerStarted","Data":"670513bbaeea0402c949936dcf2de518057a4955e538aee4c242f46226dc97f2"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.506069 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" event={"ID":"2a139c37-a580-476f-a35b-e5daba038dbc","Type":"ContainerStarted","Data":"85457b39c6bb2154aaa062033e6a295e3ee2f7acbe47b0c7e2f4c938e7ea17f2"} Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.507902 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.517246 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jxwb5" podStartSLOduration=126.517223048 podStartE2EDuration="2m6.517223048s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.513583204 +0000 UTC m=+147.236681449" watchObservedRunningTime="2026-02-01 07:25:48.517223048 +0000 UTC m=+147.240321293" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.537456 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.550729 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" podStartSLOduration=126.550714889 podStartE2EDuration="2m6.550714889s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.549051251 +0000 UTC m=+147.272149496" watchObservedRunningTime="2026-02-01 07:25:48.550714889 +0000 UTC m=+147.273813134" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.566398 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.569081 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.0690592 +0000 UTC m=+147.792157445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.621912 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-q9fms" podStartSLOduration=126.62189684 podStartE2EDuration="2m6.62189684s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:48.619705798 +0000 UTC m=+147.342804043" watchObservedRunningTime="2026-02-01 07:25:48.62189684 +0000 UTC m=+147.344995085" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.689194 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.691448 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.191422124 +0000 UTC m=+147.914520369 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.791537 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.792148 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.292118272 +0000 UTC m=+148.015216527 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.894930 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.895394 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.395372544 +0000 UTC m=+148.118470789 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.896008 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.897768 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.397729841 +0000 UTC m=+148.120828086 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.971289 4650 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-xql62 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.971394 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" podUID="a09c591f-ba12-43d6-98bf-003df4aa5813" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.972153 4650 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-xql62 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.972282 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" podUID="a09c591f-ba12-43d6-98bf-003df4aa5813" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:25:48 crc kubenswrapper[4650]: I0201 07:25:48.997851 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:48 crc kubenswrapper[4650]: E0201 07:25:48.998354 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.498333607 +0000 UTC m=+148.221431852 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.101104 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.101618 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.601593909 +0000 UTC m=+148.324692154 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.202957 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.702928866 +0000 UTC m=+148.426027111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.203016 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.203383 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.204252 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.704236773 +0000 UTC m=+148.427335018 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.305860 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.307159 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.807134075 +0000 UTC m=+148.530232320 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.408906 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.409484 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:49.90945789 +0000 UTC m=+148.632556215 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.461516 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:49 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:49 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:49 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.461610 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.510571 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.01054234 +0000 UTC m=+148.733640585 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.510637 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.511128 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.511708 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.011697373 +0000 UTC m=+148.734795618 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.517539 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" event={"ID":"2a139c37-a580-476f-a35b-e5daba038dbc","Type":"ContainerStarted","Data":"2e9cb87a68209a8c637af3e8b5ed2226512a463b4820c8374a6ceafdb7a4880f"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.523217 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" event={"ID":"11275769-14c7-40e0-a0d4-378617ac97d3","Type":"ContainerStarted","Data":"a95959e7126840e0b34d539617c5313ed4bd275804ecf37e6f3c15e406cfa7dc"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.526278 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" event={"ID":"b0d96618-e665-41d9-921f-83167ba4f6f6","Type":"ContainerStarted","Data":"b065167f5b3b95a8424802888a48a794d57609bc04ec06559e3be71afb18f790"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.526344 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" event={"ID":"b0d96618-e665-41d9-921f-83167ba4f6f6","Type":"ContainerStarted","Data":"9be659d5365cc373790a4bf45705f9a233bb0ccc10b31289592b012d97d9d2bb"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.528489 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" event={"ID":"e3b2ad4a-8a06-467b-a83a-f203dd935f9f","Type":"ContainerStarted","Data":"9930f2fac36f539f2458b5cf2a3938e941472a001a7e9027173447d244ad20ed"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.533403 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" event={"ID":"d4fec23f-d6a0-4975-b969-bc01b7dab696","Type":"ContainerStarted","Data":"5c894d22e9a508d65cd15e60ccef27b608639d57c82c4ab8521a230fe765cfdc"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.533855 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.536869 4650 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-hfv5r container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.536939 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" podUID="d4fec23f-d6a0-4975-b969-bc01b7dab696" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.540169 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" event={"ID":"01f92651-6a98-481d-8dc6-041103fc10d4","Type":"ContainerStarted","Data":"92300cc2e2dfb8f518868824f15b2177ae082b2f45530c3b21c47613e083c143"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.545334 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" event={"ID":"57fbc47e-a1c0-44f5-abd5-40a696f37a37","Type":"ContainerStarted","Data":"6b1f876112dc46a14054c25c5637d268ec79813405844494ef112ae79b3956b2"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.545933 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.548131 4650 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5tcfm container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" start-of-body= Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.548206 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" podUID="57fbc47e-a1c0-44f5-abd5-40a696f37a37" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.555174 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" event={"ID":"671ece21-a48b-4e84-bc67-7e34bbe90a6b","Type":"ContainerStarted","Data":"fa6d627de85ac6d4c490ae3ad2bb71e15c39cff294ec52c7cd85e004f63e1607"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.565714 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" event={"ID":"a2f10408-e875-4afe-89e3-9c63b8f4b2dc","Type":"ContainerStarted","Data":"8915d346aa88ab25dd234819cfc9aaaee43a05e731c8a9545df7f891789eac63"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.570239 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jqdv7" event={"ID":"499b8a50-5954-4979-9473-63f0bae378f0","Type":"ContainerStarted","Data":"6b5170ac7126ffdf35ac9b2f1fed4e2a2001884f9682ead0c55264f626641a4f"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.570318 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.575663 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" event={"ID":"badf229a-c84d-41c1-b283-e473e15ed647","Type":"ContainerStarted","Data":"cf5514468250250544f631368aee3f617adceffa1e5f637f4952637af30cc7a9"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.580158 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" event={"ID":"a3c2c4c5-34a2-46e8-a551-d6e171a10dd0","Type":"ContainerStarted","Data":"09d488786bfec4167a4c09a7abf0aa0c1f5ac0058c1a38bafe7fa493589a234d"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.587101 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" event={"ID":"0a636f07-7f37-4531-a48c-4851172534e9","Type":"ContainerStarted","Data":"732f62775d7a39d657265b740d511f3cf0b5c149c877c7a2985b83caf495d475"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.587176 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" event={"ID":"0a636f07-7f37-4531-a48c-4851172534e9","Type":"ContainerStarted","Data":"e8950ebbc7b45378940330331b5a4cd52813bedeb32c1c9582a6bdda5fb791ed"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.591367 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" event={"ID":"adc774cf-13b6-49b9-a5a7-a3816d6042a9","Type":"ContainerStarted","Data":"6bb2b07d1b6f5cdcf3f9f5011eea419e8667558d0d7b3179521a1a171742e3aa"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.601279 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" event={"ID":"0d41d4aa-0c8b-4d30-85fb-870259c021ae","Type":"ContainerStarted","Data":"dd32aa8db421bb44789b8482748425cc55306b91fdba667af862cbbb735de274"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.601340 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" event={"ID":"0d41d4aa-0c8b-4d30-85fb-870259c021ae","Type":"ContainerStarted","Data":"6b4b1aef996af4b4c3335bb511ed1777604a179b31924db245c69a2a93310124"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.601743 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.607098 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" event={"ID":"9241d1f3-454b-4448-883d-221a5274e596","Type":"ContainerStarted","Data":"f96ce15d8fef95c8270ae07b7e0f280d8cef2ee067e1f135775bf944c0362f03"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.614079 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.614533 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.114500292 +0000 UTC m=+148.837598537 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.617522 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-f5d8l" event={"ID":"b990a3f9-509d-405d-8ae1-cdcb4f752f93","Type":"ContainerStarted","Data":"377713ef3a1567ef62f5f36016473b6fdecaab28d270e6cb052295bd3380a9a6"} Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.618105 4650 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-tprml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.618248 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.618791 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.118777214 +0000 UTC m=+148.841875459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.618902 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.659440 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-28b2s" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.720626 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.721154 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.721218 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.722610 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.222588501 +0000 UTC m=+148.945686746 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.748405 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.749425 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.761332 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" podStartSLOduration=127.761310741 podStartE2EDuration="2m7.761310741s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:49.74755022 +0000 UTC m=+148.470648475" watchObservedRunningTime="2026-02-01 07:25:49.761310741 +0000 UTC m=+148.484408986" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.785860 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.823588 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.824297 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.324283319 +0000 UTC m=+149.047381564 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.870006 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z9whs" podStartSLOduration=127.869979916 podStartE2EDuration="2m7.869979916s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:49.869289567 +0000 UTC m=+148.592387812" watchObservedRunningTime="2026-02-01 07:25:49.869979916 +0000 UTC m=+148.593078151" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.925361 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.925587 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.425546734 +0000 UTC m=+149.148644979 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.925789 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.925902 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.925971 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:49 crc kubenswrapper[4650]: E0201 07:25:49.926222 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.426213813 +0000 UTC m=+149.149312138 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.930496 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.938159 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.976271 4650 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-xql62 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.976391 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" podUID="a09c591f-ba12-43d6-98bf-003df4aa5813" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.979589 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 01 07:25:49 crc kubenswrapper[4650]: I0201 07:25:49.991730 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.028121 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.028821 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.528802136 +0000 UTC m=+149.251900381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.139102 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.139528 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.639512609 +0000 UTC m=+149.362610844 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.243726 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.244266 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.744233022 +0000 UTC m=+149.467331257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.244320 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.244684 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.744668445 +0000 UTC m=+149.467766690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.343471 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-s5qql" podStartSLOduration=127.343445799 podStartE2EDuration="2m7.343445799s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:50.103234189 +0000 UTC m=+148.826332444" watchObservedRunningTime="2026-02-01 07:25:50.343445799 +0000 UTC m=+149.066544044" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.345696 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.345886 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.845860998 +0000 UTC m=+149.568959243 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.345966 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.346364 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.846356392 +0000 UTC m=+149.569454637 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.450644 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.450920 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.95089425 +0000 UTC m=+149.673992495 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.450986 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.451488 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:50.951469487 +0000 UTC m=+149.674567732 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.465296 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:50 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:50 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:50 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.465381 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.525871 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-s4bcm" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.533876 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9v8k" podStartSLOduration=128.533857586 podStartE2EDuration="2m8.533857586s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:50.341599167 +0000 UTC m=+149.064697412" watchObservedRunningTime="2026-02-01 07:25:50.533857586 +0000 UTC m=+149.256955831" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.554380 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.554944 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.054923534 +0000 UTC m=+149.778021769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.656784 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.662344 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.162328564 +0000 UTC m=+149.885426809 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.677837 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.757525 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.758011 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.25798827 +0000 UTC m=+149.981086515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.867127 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.867632 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.367608632 +0000 UTC m=+150.090706877 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.877713 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" podStartSLOduration=127.877689309 podStartE2EDuration="2m7.877689309s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:50.874753885 +0000 UTC m=+149.597852140" watchObservedRunningTime="2026-02-01 07:25:50.877689309 +0000 UTC m=+149.600787554" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.878176 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" podStartSLOduration=127.878169892 podStartE2EDuration="2m7.878169892s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:50.534747341 +0000 UTC m=+149.257845586" watchObservedRunningTime="2026-02-01 07:25:50.878169892 +0000 UTC m=+149.601268137" Feb 01 07:25:50 crc kubenswrapper[4650]: I0201 07:25:50.968862 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:50 crc kubenswrapper[4650]: E0201 07:25:50.969496 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.469433773 +0000 UTC m=+150.192532028 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.026078 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8zlcr" podStartSLOduration=129.026054671 podStartE2EDuration="2m9.026054671s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.025140235 +0000 UTC m=+149.748238480" watchObservedRunningTime="2026-02-01 07:25:51.026054671 +0000 UTC m=+149.749152926" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.038960 4650 csr.go:261] certificate signing request csr-d7v8r is approved, waiting to be issued Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.072036 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.072416 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.572390597 +0000 UTC m=+150.295488842 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.091482 4650 csr.go:257] certificate signing request csr-d7v8r is issued Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.159517 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-xql62" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.172800 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.173233 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.673210629 +0000 UTC m=+150.396308864 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.221890 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-sl4ph" podStartSLOduration=129.221849731 podStartE2EDuration="2m9.221849731s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.205753073 +0000 UTC m=+149.928851318" watchObservedRunningTime="2026-02-01 07:25:51.221849731 +0000 UTC m=+149.944947976" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.222917 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zcd4l" podStartSLOduration=129.222910671 podStartE2EDuration="2m9.222910671s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.091602602 +0000 UTC m=+149.814700857" watchObservedRunningTime="2026-02-01 07:25:51.222910671 +0000 UTC m=+149.946008906" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.274466 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.274930 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.774904807 +0000 UTC m=+150.498003052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.376629 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.377216 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.877195001 +0000 UTC m=+150.600293246 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.379481 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" podStartSLOduration=128.379452245 podStartE2EDuration="2m8.379452245s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.34652225 +0000 UTC m=+150.069620495" watchObservedRunningTime="2026-02-01 07:25:51.379452245 +0000 UTC m=+150.102550480" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.414886 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.414931 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.439141 4650 patch_prober.go:28] interesting pod/apiserver-76f77b778f-24q9r container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]log ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]etcd ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/start-apiserver-admission-initializer ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/generic-apiserver-start-informers ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/max-in-flight-filter ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/storage-object-count-tracker-hook ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/image.openshift.io-apiserver-caches ok Feb 01 07:25:51 crc kubenswrapper[4650]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/project.openshift.io-projectcache ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/openshift.io-startinformers ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/openshift.io-restmapperupdater ok Feb 01 07:25:51 crc kubenswrapper[4650]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Feb 01 07:25:51 crc kubenswrapper[4650]: livez check failed Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.439210 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" podUID="e5471807-9088-4007-b0dc-b68760e76415" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.462347 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.465189 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:51 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:51 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:51 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.465232 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.490850 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.493860 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:51.993838903 +0000 UTC m=+150.716937148 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.593635 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.595998 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.095972673 +0000 UTC m=+150.819070918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.626899 4650 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5tcfm container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.626966 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" podUID="57fbc47e-a1c0-44f5-abd5-40a696f37a37" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.649860 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zlgfx"] Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.651326 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.682705 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-f5d8l" podStartSLOduration=11.682676095 podStartE2EDuration="11.682676095s" podCreationTimestamp="2026-02-01 07:25:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.639440257 +0000 UTC m=+150.362538502" watchObservedRunningTime="2026-02-01 07:25:51.682676095 +0000 UTC m=+150.405774340" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.687060 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"25f0a3fbf94f41d811a160c967d152e57b0de96c46c30768c468c54d000b512b"} Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.697140 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" event={"ID":"9241d1f3-454b-4448-883d-221a5274e596","Type":"ContainerStarted","Data":"5748ccb7d739fa739683484f212d5cc7ce5729eaf4309823e266ba812155e413"} Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.699281 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-catalog-content\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.699471 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.699568 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t99mk\" (UniqueName: \"kubernetes.io/projected/51dbc0bf-4be1-4dcc-b406-262067016c90-kube-api-access-t99mk\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.699691 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-utilities\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.700184 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.200169792 +0000 UTC m=+150.923268037 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.700462 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.714382 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kwqn7"] Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.715700 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.733511 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zlgfx"] Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.774235 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kwqn7"] Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.778796 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.797277 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.801572 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.802044 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-catalog-content\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.802095 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t99mk\" (UniqueName: \"kubernetes.io/projected/51dbc0bf-4be1-4dcc-b406-262067016c90-kube-api-access-t99mk\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.802143 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-catalog-content\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.802235 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk8hf\" (UniqueName: \"kubernetes.io/projected/15308cf7-fed5-4bf2-84e9-ff7ea341303f-kube-api-access-mk8hf\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.802334 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-utilities\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.802454 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-utilities\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.803976 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.303945898 +0000 UTC m=+151.027044143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.804575 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-catalog-content\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.805079 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-utilities\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: W0201 07:25:51.807247 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-fd26cd764c533c3fafe36e1c4e59d3cf6e498b3616306f7cfbea416e1ba3e41d WatchSource:0}: Error finding container fd26cd764c533c3fafe36e1c4e59d3cf6e498b3616306f7cfbea416e1ba3e41d: Status 404 returned error can't find the container with id fd26cd764c533c3fafe36e1c4e59d3cf6e498b3616306f7cfbea416e1ba3e41d Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.863788 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-gpqvq" podStartSLOduration=129.863765017 podStartE2EDuration="2m9.863765017s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.842003099 +0000 UTC m=+150.565101344" watchObservedRunningTime="2026-02-01 07:25:51.863765017 +0000 UTC m=+150.586863262" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.864944 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-g8mcc" podStartSLOduration=129.86493569 podStartE2EDuration="2m9.86493569s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.778573438 +0000 UTC m=+150.501671683" watchObservedRunningTime="2026-02-01 07:25:51.86493569 +0000 UTC m=+150.588033935" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.902980 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t99mk\" (UniqueName: \"kubernetes.io/projected/51dbc0bf-4be1-4dcc-b406-262067016c90-kube-api-access-t99mk\") pod \"community-operators-zlgfx\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.904441 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-utilities\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.904513 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.904537 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-catalog-content\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.904583 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk8hf\" (UniqueName: \"kubernetes.io/projected/15308cf7-fed5-4bf2-84e9-ff7ea341303f-kube-api-access-mk8hf\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.905225 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-utilities\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: E0201 07:25:51.905825 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.40581062 +0000 UTC m=+151.128908865 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.906229 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-catalog-content\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.912420 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9b67j"] Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.913630 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.929880 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk8hf\" (UniqueName: \"kubernetes.io/projected/15308cf7-fed5-4bf2-84e9-ff7ea341303f-kube-api-access-mk8hf\") pod \"certified-operators-kwqn7\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:51 crc kubenswrapper[4650]: I0201 07:25:51.942605 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jqdv7" podStartSLOduration=11.942563804 podStartE2EDuration="11.942563804s" podCreationTimestamp="2026-02-01 07:25:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.941666288 +0000 UTC m=+150.664764543" watchObservedRunningTime="2026-02-01 07:25:51.942563804 +0000 UTC m=+150.665662049" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.018190 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.018883 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr7rj\" (UniqueName: \"kubernetes.io/projected/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-kube-api-access-hr7rj\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.018923 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-utilities\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.018971 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-catalog-content\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.019093 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.519072616 +0000 UTC m=+151.242170861 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.020373 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85g2d" podStartSLOduration=130.020361973 podStartE2EDuration="2m10.020361973s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:51.991121193 +0000 UTC m=+150.714219438" watchObservedRunningTime="2026-02-01 07:25:52.020361973 +0000 UTC m=+150.743460218" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.024429 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.056835 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-d4v2s"] Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.058003 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.058049 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.058065 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9b67j"] Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.058840 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.068625 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.079008 4650 patch_prober.go:28] interesting pod/downloads-7954f5f757-xfg9f container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.079117 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-xfg9f" podUID="169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.079513 4650 patch_prober.go:28] interesting pod/downloads-7954f5f757-xfg9f container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.079534 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xfg9f" podUID="169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.093111 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-02-01 07:20:51 +0000 UTC, rotation deadline is 2026-11-07 06:11:26.7673812 +0000 UTC Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.093153 4650 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6694h45m34.67423134s for next certificate rotation Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.118449 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hfv5r" podStartSLOduration=129.118395547 podStartE2EDuration="2m9.118395547s" podCreationTimestamp="2026-02-01 07:23:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:52.087769517 +0000 UTC m=+150.810867782" watchObservedRunningTime="2026-02-01 07:25:52.118395547 +0000 UTC m=+150.841493792" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.119176 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.120137 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-utilities\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.120178 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-catalog-content\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.120211 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdxfh\" (UniqueName: \"kubernetes.io/projected/3ed3c67d-2427-4ee2-950c-0f705023db71-kube-api-access-cdxfh\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.120340 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr7rj\" (UniqueName: \"kubernetes.io/projected/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-kube-api-access-hr7rj\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.120398 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-catalog-content\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.120424 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-utilities\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.120456 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.120887 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.620844776 +0000 UTC m=+151.343943021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.122386 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-catalog-content\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.126645 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-utilities\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.148911 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d4v2s"] Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.221680 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.221902 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-catalog-content\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.221965 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-utilities\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.221988 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdxfh\" (UniqueName: \"kubernetes.io/projected/3ed3c67d-2427-4ee2-950c-0f705023db71-kube-api-access-cdxfh\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.222497 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.722478892 +0000 UTC m=+151.445577137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.222907 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-catalog-content\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.223155 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-utilities\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.231320 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr7rj\" (UniqueName: \"kubernetes.io/projected/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-kube-api-access-hr7rj\") pod \"community-operators-9b67j\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.322620 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.322669 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.324515 4650 patch_prober.go:28] interesting pod/console-f9d7485db-snf8v container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.324567 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-snf8v" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.325579 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.326015 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.82600297 +0000 UTC m=+151.549101205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.342248 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.426614 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.427330 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:52.927312507 +0000 UTC m=+151.650410752 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.494918 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:52 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:52 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:52 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.494981 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.496267 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdxfh\" (UniqueName: \"kubernetes.io/projected/3ed3c67d-2427-4ee2-950c-0f705023db71-kube-api-access-cdxfh\") pod \"certified-operators-d4v2s\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.534106 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.535947 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.035932441 +0000 UTC m=+151.759030686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.637620 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.638524 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.138487173 +0000 UTC m=+151.861585408 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.666827 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5tcfm" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.713705 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"79fa92e0adc39949be5fdeb60dac1a96ab9746bc592bd609f8d9c0dfe785081b"} Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.713772 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"8b762ee090dbe01b636124b41f18fd52565d0adf0c680a58382ba1a71f804986"} Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.718346 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"d469662b0f2f0638c1699e30af7afd92e7684a85626438d7c87de6ab7ce9a7b7"} Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.718528 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.720528 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"86916efcd2c4768ab2b207b44d12dc6c2a6a9779b7dd27c18c596f3ae3e954c7"} Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.720564 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"fd26cd764c533c3fafe36e1c4e59d3cf6e498b3616306f7cfbea416e1ba3e41d"} Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.727454 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.729106 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-b7vsd" Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.741056 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.741703 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.241682963 +0000 UTC m=+151.964781208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.841840 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.842233 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.342213917 +0000 UTC m=+152.065312162 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:52 crc kubenswrapper[4650]: I0201 07:25:52.944125 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:52 crc kubenswrapper[4650]: E0201 07:25:52.944509 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.444493051 +0000 UTC m=+152.167591286 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.044903 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.045395 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.545365955 +0000 UTC m=+152.268464200 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.045710 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.046131 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.546121277 +0000 UTC m=+152.269219522 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.150509 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.150796 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.650782289 +0000 UTC m=+152.373880534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.251311 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.251655 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.751641542 +0000 UTC m=+152.474739787 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.361952 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.363590 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.86355034 +0000 UTC m=+152.586648585 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.363808 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.364249 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.864236479 +0000 UTC m=+152.587334724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.378515 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.453893 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.464947 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.466585 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:53.966559725 +0000 UTC m=+152.689657980 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.471262 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sfl8l"] Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.472473 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.478506 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:53 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:53 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:53 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.478587 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.489522 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.527739 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfl8l"] Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.566612 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.567182 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.06712436 +0000 UTC m=+152.790222605 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.667972 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.668270 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-catalog-content\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.668387 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-utilities\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.668408 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mldh\" (UniqueName: \"kubernetes.io/projected/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-kube-api-access-4mldh\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.668517 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.168496878 +0000 UTC m=+152.891595123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.670847 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zlgfx"] Feb 01 07:25:53 crc kubenswrapper[4650]: W0201 07:25:53.705063 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51dbc0bf_4be1_4dcc_b406_262067016c90.slice/crio-942cad7f488a4c03104b47aef824aa84f150e040c786c6171382f9bc1880e0f0 WatchSource:0}: Error finding container 942cad7f488a4c03104b47aef824aa84f150e040c786c6171382f9bc1880e0f0: Status 404 returned error can't find the container with id 942cad7f488a4c03104b47aef824aa84f150e040c786c6171382f9bc1880e0f0 Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.769997 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" event={"ID":"9241d1f3-454b-4448-883d-221a5274e596","Type":"ContainerStarted","Data":"52cbff86dd44925046a895f2a42ea2c7d5d21c7c0fb56e7b389f3b7c1a541f4f"} Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.771937 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-utilities\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.771977 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mldh\" (UniqueName: \"kubernetes.io/projected/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-kube-api-access-4mldh\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.772001 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-catalog-content\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.772055 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.772429 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.272412599 +0000 UTC m=+152.995510844 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.773062 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-utilities\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.773628 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-catalog-content\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.788103 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlgfx" event={"ID":"51dbc0bf-4be1-4dcc-b406-262067016c90","Type":"ContainerStarted","Data":"942cad7f488a4c03104b47aef824aa84f150e040c786c6171382f9bc1880e0f0"} Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.795406 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2f7qv"] Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.796494 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.821989 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mldh\" (UniqueName: \"kubernetes.io/projected/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-kube-api-access-4mldh\") pod \"redhat-marketplace-sfl8l\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.861473 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2f7qv"] Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.877710 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.877847 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.377817732 +0000 UTC m=+153.100915977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.878314 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.879743 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-utilities\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.879853 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kchjx\" (UniqueName: \"kubernetes.io/projected/86213c9a-3227-4329-8a45-83e5b550f4a2-kube-api-access-kchjx\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.879899 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-catalog-content\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.881287 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.38126639 +0000 UTC m=+153.104364635 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.917473 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.981148 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.981309 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-catalog-content\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.981419 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-utilities\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.981453 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kchjx\" (UniqueName: \"kubernetes.io/projected/86213c9a-3227-4329-8a45-83e5b550f4a2-kube-api-access-kchjx\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: E0201 07:25:53.981806 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.481791344 +0000 UTC m=+153.204889589 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.982223 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-catalog-content\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:53 crc kubenswrapper[4650]: I0201 07:25:53.982692 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-utilities\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.034084 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kchjx\" (UniqueName: \"kubernetes.io/projected/86213c9a-3227-4329-8a45-83e5b550f4a2-kube-api-access-kchjx\") pod \"redhat-marketplace-2f7qv\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.082868 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.083316 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.583299456 +0000 UTC m=+153.306397701 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.151469 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.196802 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.197185 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.697075836 +0000 UTC m=+153.420174081 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.197335 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.197754 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.697746915 +0000 UTC m=+153.420845160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.283559 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kwqn7"] Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.309698 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.310207 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.810182128 +0000 UTC m=+153.533280373 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.384805 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9b67j"] Feb 01 07:25:54 crc kubenswrapper[4650]: W0201 07:25:54.402235 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c5c24b4_b4cc_4577_8cbd_c299f6fd5561.slice/crio-b1079126d19cd3c437247dfcb9c2e556e2329b7f2ccf19ddc2e676eb1e8f22c0 WatchSource:0}: Error finding container b1079126d19cd3c437247dfcb9c2e556e2329b7f2ccf19ddc2e676eb1e8f22c0: Status 404 returned error can't find the container with id b1079126d19cd3c437247dfcb9c2e556e2329b7f2ccf19ddc2e676eb1e8f22c0 Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.412234 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.412598 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:54.912584455 +0000 UTC m=+153.635682700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.432361 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfl8l"] Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.463228 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:54 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:54 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:54 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.463283 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.484576 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d4v2s"] Feb 01 07:25:54 crc kubenswrapper[4650]: W0201 07:25:54.489447 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda20ec82b_4c7b_41da_9766_3a6d3dbde1c0.slice/crio-2f7433b9e9c5d4e6fab1201421b6f58b9b31b5a412939346675cc0c9f99a0e00 WatchSource:0}: Error finding container 2f7433b9e9c5d4e6fab1201421b6f58b9b31b5a412939346675cc0c9f99a0e00: Status 404 returned error can't find the container with id 2f7433b9e9c5d4e6fab1201421b6f58b9b31b5a412939346675cc0c9f99a0e00 Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.520623 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.521110 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.021085026 +0000 UTC m=+153.744183271 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.544686 4650 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.616703 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2f7qv"] Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.623485 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.624072 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.124018669 +0000 UTC m=+153.847116914 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: W0201 07:25:54.653332 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86213c9a_3227_4329_8a45_83e5b550f4a2.slice/crio-15928b9a8051d1069220c2c86c2bcaf1705a019243d6d66a1ec21952713bc286 WatchSource:0}: Error finding container 15928b9a8051d1069220c2c86c2bcaf1705a019243d6d66a1ec21952713bc286: Status 404 returned error can't find the container with id 15928b9a8051d1069220c2c86c2bcaf1705a019243d6d66a1ec21952713bc286 Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.725203 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.725569 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.225545572 +0000 UTC m=+153.948643817 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.780420 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5s2bx"] Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.782616 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.785911 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.804535 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5s2bx"] Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.805396 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwqn7" event={"ID":"15308cf7-fed5-4bf2-84e9-ff7ea341303f","Type":"ContainerStarted","Data":"a6b708c7aa816d82a2c0815ed12b62c0c3954955a61e931da4fdbecefbff6996"} Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.817155 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" event={"ID":"9241d1f3-454b-4448-883d-221a5274e596","Type":"ContainerStarted","Data":"0a760f5c168a9b6635bb3b517e56e3555fa6c51cc7b7e4f3f0aad204d1537573"} Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.818293 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2f7qv" event={"ID":"86213c9a-3227-4329-8a45-83e5b550f4a2","Type":"ContainerStarted","Data":"15928b9a8051d1069220c2c86c2bcaf1705a019243d6d66a1ec21952713bc286"} Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.819566 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlgfx" event={"ID":"51dbc0bf-4be1-4dcc-b406-262067016c90","Type":"ContainerStarted","Data":"a9757aefe072de324a8de9acbc90470352fdb77d9542aa3ac937fa354573a0db"} Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.820382 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfl8l" event={"ID":"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0","Type":"ContainerStarted","Data":"2f7433b9e9c5d4e6fab1201421b6f58b9b31b5a412939346675cc0c9f99a0e00"} Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.823000 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4v2s" event={"ID":"3ed3c67d-2427-4ee2-950c-0f705023db71","Type":"ContainerStarted","Data":"06639540c0d90b18b4099f051b74f12a187b4a36bd450bd57a12c74519291daa"} Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.823843 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b67j" event={"ID":"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561","Type":"ContainerStarted","Data":"b1079126d19cd3c437247dfcb9c2e556e2329b7f2ccf19ddc2e676eb1e8f22c0"} Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.831186 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-catalog-content\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.831625 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.831674 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn9pd\" (UniqueName: \"kubernetes.io/projected/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-kube-api-access-kn9pd\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.831735 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-utilities\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.832150 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.332133868 +0000 UTC m=+154.055232113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.932849 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.933377 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.433334222 +0000 UTC m=+154.156432477 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.934794 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-utilities\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.934876 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-catalog-content\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.934956 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.935044 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn9pd\" (UniqueName: \"kubernetes.io/projected/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-kube-api-access-kn9pd\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.935608 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-catalog-content\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: E0201 07:25:54.935863 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.435852553 +0000 UTC m=+154.158950808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.935894 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-utilities\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:54 crc kubenswrapper[4650]: I0201 07:25:54.960424 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn9pd\" (UniqueName: \"kubernetes.io/projected/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-kube-api-access-kn9pd\") pod \"redhat-operators-5s2bx\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.036516 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:55 crc kubenswrapper[4650]: E0201 07:25:55.036958 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.536920423 +0000 UTC m=+154.260018738 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.037055 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:55 crc kubenswrapper[4650]: E0201 07:25:55.037724 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.537715025 +0000 UTC m=+154.260813270 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.102398 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.138860 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:55 crc kubenswrapper[4650]: E0201 07:25:55.139285 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.639244118 +0000 UTC m=+154.362342363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.139755 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:55 crc kubenswrapper[4650]: E0201 07:25:55.140232 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-01 07:25:55.640221166 +0000 UTC m=+154.363319411 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-crkwn" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.152697 4650 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-02-01T07:25:54.544726528Z","Handler":null,"Name":""} Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.163500 4650 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.163579 4650 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.171042 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xn7fv"] Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.172157 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.185420 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jqdv7" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.203158 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xn7fv"] Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.240890 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.241174 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-utilities\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.241207 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxrj2\" (UniqueName: \"kubernetes.io/projected/029eaf1d-b5ae-4719-b2b1-243c9b8850db-kube-api-access-bxrj2\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.241232 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-catalog-content\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.248371 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.342420 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-utilities\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.342476 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxrj2\" (UniqueName: \"kubernetes.io/projected/029eaf1d-b5ae-4719-b2b1-243c9b8850db-kube-api-access-bxrj2\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.342505 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-catalog-content\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.342552 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.343072 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-utilities\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.343206 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-catalog-content\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.381337 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxrj2\" (UniqueName: \"kubernetes.io/projected/029eaf1d-b5ae-4719-b2b1-243c9b8850db-kube-api-access-bxrj2\") pod \"redhat-operators-xn7fv\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.460617 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:55 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:55 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:55 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.460706 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.489505 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.592894 4650 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.592977 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.616064 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5s2bx"] Feb 01 07:25:55 crc kubenswrapper[4650]: W0201 07:25:55.636140 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81e5cda9_5051_4b4d_a3fb_3acb0b780e4c.slice/crio-27fbc063cfbfd516a37bb9616f98a014f121f3a1c4097b0317de40ed34ae8b12 WatchSource:0}: Error finding container 27fbc063cfbfd516a37bb9616f98a014f121f3a1c4097b0317de40ed34ae8b12: Status 404 returned error can't find the container with id 27fbc063cfbfd516a37bb9616f98a014f121f3a1c4097b0317de40ed34ae8b12 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.738565 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-crkwn\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.862864 4650 generic.go:334] "Generic (PLEG): container finished" podID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerID="1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8" exitCode=0 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.862964 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b67j" event={"ID":"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561","Type":"ContainerDied","Data":"1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8"} Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.896404 4650 generic.go:334] "Generic (PLEG): container finished" podID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerID="e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4" exitCode=0 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.896498 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwqn7" event={"ID":"15308cf7-fed5-4bf2-84e9-ff7ea341303f","Type":"ContainerDied","Data":"e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4"} Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.896809 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.899602 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.906078 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5s2bx" event={"ID":"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c","Type":"ContainerStarted","Data":"27fbc063cfbfd516a37bb9616f98a014f121f3a1c4097b0317de40ed34ae8b12"} Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.917483 4650 generic.go:334] "Generic (PLEG): container finished" podID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerID="d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e" exitCode=0 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.917684 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2f7qv" event={"ID":"86213c9a-3227-4329-8a45-83e5b550f4a2","Type":"ContainerDied","Data":"d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e"} Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.923612 4650 generic.go:334] "Generic (PLEG): container finished" podID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerID="a9757aefe072de324a8de9acbc90470352fdb77d9542aa3ac937fa354573a0db" exitCode=0 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.923724 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlgfx" event={"ID":"51dbc0bf-4be1-4dcc-b406-262067016c90","Type":"ContainerDied","Data":"a9757aefe072de324a8de9acbc90470352fdb77d9542aa3ac937fa354573a0db"} Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.931112 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xn7fv"] Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.946309 4650 generic.go:334] "Generic (PLEG): container finished" podID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerID="aca37bb8e6683e77dfcd256a724c3ed263a590809d2f7749e9cbf2fb4f7f4170" exitCode=0 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.946443 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfl8l" event={"ID":"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0","Type":"ContainerDied","Data":"aca37bb8e6683e77dfcd256a724c3ed263a590809d2f7749e9cbf2fb4f7f4170"} Feb 01 07:25:55 crc kubenswrapper[4650]: W0201 07:25:55.965802 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod029eaf1d_b5ae_4719_b2b1_243c9b8850db.slice/crio-dea343751e02552d9c62b3c238b213e5f52413b6ef3a51dfa7abb26f5cbdba95 WatchSource:0}: Error finding container dea343751e02552d9c62b3c238b213e5f52413b6ef3a51dfa7abb26f5cbdba95: Status 404 returned error can't find the container with id dea343751e02552d9c62b3c238b213e5f52413b6ef3a51dfa7abb26f5cbdba95 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.965865 4650 generic.go:334] "Generic (PLEG): container finished" podID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerID="e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056" exitCode=0 Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.986638 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Feb 01 07:25:55 crc kubenswrapper[4650]: I0201 07:25:55.989863 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4v2s" event={"ID":"3ed3c67d-2427-4ee2-950c-0f705023db71","Type":"ContainerDied","Data":"e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056"} Feb 01 07:25:56 crc kubenswrapper[4650]: I0201 07:25:56.151646 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-nsmmn" podStartSLOduration=16.151627462 podStartE2EDuration="16.151627462s" podCreationTimestamp="2026-02-01 07:25:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:56.14731142 +0000 UTC m=+154.870409665" watchObservedRunningTime="2026-02-01 07:25:56.151627462 +0000 UTC m=+154.874725707" Feb 01 07:25:56 crc kubenswrapper[4650]: I0201 07:25:56.365203 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-crkwn"] Feb 01 07:25:56 crc kubenswrapper[4650]: W0201 07:25:56.372249 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0e132bd_4673_48b5_9362_32781a1f9405.slice/crio-fe41ef17767a15d9d83e8618e1460b20fc2f62816cb4bcee0bbf23f44e0240dd WatchSource:0}: Error finding container fe41ef17767a15d9d83e8618e1460b20fc2f62816cb4bcee0bbf23f44e0240dd: Status 404 returned error can't find the container with id fe41ef17767a15d9d83e8618e1460b20fc2f62816cb4bcee0bbf23f44e0240dd Feb 01 07:25:56 crc kubenswrapper[4650]: I0201 07:25:56.422211 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:56 crc kubenswrapper[4650]: I0201 07:25:56.428298 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-24q9r" Feb 01 07:25:56 crc kubenswrapper[4650]: I0201 07:25:56.478705 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:56 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:56 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:56 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:56 crc kubenswrapper[4650]: I0201 07:25:56.478786 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.013890 4650 generic.go:334] "Generic (PLEG): container finished" podID="2a139c37-a580-476f-a35b-e5daba038dbc" containerID="2e9cb87a68209a8c637af3e8b5ed2226512a463b4820c8374a6ceafdb7a4880f" exitCode=0 Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.014046 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" event={"ID":"2a139c37-a580-476f-a35b-e5daba038dbc","Type":"ContainerDied","Data":"2e9cb87a68209a8c637af3e8b5ed2226512a463b4820c8374a6ceafdb7a4880f"} Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.020274 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" event={"ID":"a0e132bd-4673-48b5-9362-32781a1f9405","Type":"ContainerStarted","Data":"a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d"} Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.020328 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" event={"ID":"a0e132bd-4673-48b5-9362-32781a1f9405","Type":"ContainerStarted","Data":"fe41ef17767a15d9d83e8618e1460b20fc2f62816cb4bcee0bbf23f44e0240dd"} Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.021346 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.027144 4650 generic.go:334] "Generic (PLEG): container finished" podID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerID="f546ebacaeb8d69835a1842ad39516997ff51a2061d09a38823b59deb74bf6ad" exitCode=0 Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.027208 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5s2bx" event={"ID":"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c","Type":"ContainerDied","Data":"f546ebacaeb8d69835a1842ad39516997ff51a2061d09a38823b59deb74bf6ad"} Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.073371 4650 generic.go:334] "Generic (PLEG): container finished" podID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerID="77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5" exitCode=0 Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.074837 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn7fv" event={"ID":"029eaf1d-b5ae-4719-b2b1-243c9b8850db","Type":"ContainerDied","Data":"77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5"} Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.074877 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn7fv" event={"ID":"029eaf1d-b5ae-4719-b2b1-243c9b8850db","Type":"ContainerStarted","Data":"dea343751e02552d9c62b3c238b213e5f52413b6ef3a51dfa7abb26f5cbdba95"} Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.320419 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" podStartSLOduration=135.320385007 podStartE2EDuration="2m15.320385007s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:25:57.22644124 +0000 UTC m=+155.949539495" watchObservedRunningTime="2026-02-01 07:25:57.320385007 +0000 UTC m=+156.043483252" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.349261 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.350071 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.363579 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.363872 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.395136 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.481430 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:57 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:57 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:57 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.481494 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.512811 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/725de557-211c-45a1-95c6-a694f835e02f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.512867 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/725de557-211c-45a1-95c6-a694f835e02f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.646540 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/725de557-211c-45a1-95c6-a694f835e02f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.654077 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/725de557-211c-45a1-95c6-a694f835e02f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.646654 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/725de557-211c-45a1-95c6-a694f835e02f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.695247 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/725de557-211c-45a1-95c6-a694f835e02f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:57 crc kubenswrapper[4650]: I0201 07:25:57.980636 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:25:58 crc kubenswrapper[4650]: I0201 07:25:58.470594 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:58 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:58 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:58 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:58 crc kubenswrapper[4650]: I0201 07:25:58.471117 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:25:58 crc kubenswrapper[4650]: I0201 07:25:58.904748 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 01 07:25:58 crc kubenswrapper[4650]: I0201 07:25:58.914235 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:58 crc kubenswrapper[4650]: W0201 07:25:58.986757 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod725de557_211c_45a1_95c6_a694f835e02f.slice/crio-58a3a70c6c29d35c4634b17a51257fc7489489f894de49b81eb1f2a70ff554f2 WatchSource:0}: Error finding container 58a3a70c6c29d35c4634b17a51257fc7489489f894de49b81eb1f2a70ff554f2: Status 404 returned error can't find the container with id 58a3a70c6c29d35c4634b17a51257fc7489489f894de49b81eb1f2a70ff554f2 Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.018589 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a139c37-a580-476f-a35b-e5daba038dbc-secret-volume\") pod \"2a139c37-a580-476f-a35b-e5daba038dbc\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.018695 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a139c37-a580-476f-a35b-e5daba038dbc-config-volume\") pod \"2a139c37-a580-476f-a35b-e5daba038dbc\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.018738 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67v7g\" (UniqueName: \"kubernetes.io/projected/2a139c37-a580-476f-a35b-e5daba038dbc-kube-api-access-67v7g\") pod \"2a139c37-a580-476f-a35b-e5daba038dbc\" (UID: \"2a139c37-a580-476f-a35b-e5daba038dbc\") " Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.020390 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a139c37-a580-476f-a35b-e5daba038dbc-config-volume" (OuterVolumeSpecName: "config-volume") pod "2a139c37-a580-476f-a35b-e5daba038dbc" (UID: "2a139c37-a580-476f-a35b-e5daba038dbc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.029533 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a139c37-a580-476f-a35b-e5daba038dbc-kube-api-access-67v7g" (OuterVolumeSpecName: "kube-api-access-67v7g") pod "2a139c37-a580-476f-a35b-e5daba038dbc" (UID: "2a139c37-a580-476f-a35b-e5daba038dbc"). InnerVolumeSpecName "kube-api-access-67v7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.044271 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a139c37-a580-476f-a35b-e5daba038dbc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2a139c37-a580-476f-a35b-e5daba038dbc" (UID: "2a139c37-a580-476f-a35b-e5daba038dbc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.120472 4650 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a139c37-a580-476f-a35b-e5daba038dbc-config-volume\") on node \"crc\" DevicePath \"\"" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.120511 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67v7g\" (UniqueName: \"kubernetes.io/projected/2a139c37-a580-476f-a35b-e5daba038dbc-kube-api-access-67v7g\") on node \"crc\" DevicePath \"\"" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.120523 4650 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a139c37-a580-476f-a35b-e5daba038dbc-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.277356 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"725de557-211c-45a1-95c6-a694f835e02f","Type":"ContainerStarted","Data":"58a3a70c6c29d35c4634b17a51257fc7489489f894de49b81eb1f2a70ff554f2"} Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.302716 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.304360 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh" event={"ID":"2a139c37-a580-476f-a35b-e5daba038dbc","Type":"ContainerDied","Data":"85457b39c6bb2154aaa062033e6a295e3ee2f7acbe47b0c7e2f4c938e7ea17f2"} Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.304804 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85457b39c6bb2154aaa062033e6a295e3ee2f7acbe47b0c7e2f4c938e7ea17f2" Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.456365 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:25:59 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:25:59 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:25:59 crc kubenswrapper[4650]: healthz check failed Feb 01 07:25:59 crc kubenswrapper[4650]: I0201 07:25:59.456454 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.019048 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 01 07:26:00 crc kubenswrapper[4650]: E0201 07:26:00.019656 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a139c37-a580-476f-a35b-e5daba038dbc" containerName="collect-profiles" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.019669 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a139c37-a580-476f-a35b-e5daba038dbc" containerName="collect-profiles" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.019784 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a139c37-a580-476f-a35b-e5daba038dbc" containerName="collect-profiles" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.020193 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.020273 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.022773 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.023224 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.145896 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/19c50de5-6a0c-4038-8f55-268a21ec47ea-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.146069 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/19c50de5-6a0c-4038-8f55-268a21ec47ea-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.248190 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/19c50de5-6a0c-4038-8f55-268a21ec47ea-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.248268 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/19c50de5-6a0c-4038-8f55-268a21ec47ea-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.248354 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/19c50de5-6a0c-4038-8f55-268a21ec47ea-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.283963 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/19c50de5-6a0c-4038-8f55-268a21ec47ea-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.369286 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.469131 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:00 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:00 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:00 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.470266 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:00 crc kubenswrapper[4650]: I0201 07:26:00.932323 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 01 07:26:00 crc kubenswrapper[4650]: W0201 07:26:00.971079 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod19c50de5_6a0c_4038_8f55_268a21ec47ea.slice/crio-9c3b3a1cbacd60daf642a98319baa1cc94183be27699bf6995fec71572a468e8 WatchSource:0}: Error finding container 9c3b3a1cbacd60daf642a98319baa1cc94183be27699bf6995fec71572a468e8: Status 404 returned error can't find the container with id 9c3b3a1cbacd60daf642a98319baa1cc94183be27699bf6995fec71572a468e8 Feb 01 07:26:01 crc kubenswrapper[4650]: I0201 07:26:01.413384 4650 generic.go:334] "Generic (PLEG): container finished" podID="725de557-211c-45a1-95c6-a694f835e02f" containerID="17fc73a9189c4c638cdc48b775b6e7abe2d2823f3566d64947784bb24aecb9a4" exitCode=0 Feb 01 07:26:01 crc kubenswrapper[4650]: I0201 07:26:01.413633 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"725de557-211c-45a1-95c6-a694f835e02f","Type":"ContainerDied","Data":"17fc73a9189c4c638cdc48b775b6e7abe2d2823f3566d64947784bb24aecb9a4"} Feb 01 07:26:01 crc kubenswrapper[4650]: I0201 07:26:01.415419 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"19c50de5-6a0c-4038-8f55-268a21ec47ea","Type":"ContainerStarted","Data":"9c3b3a1cbacd60daf642a98319baa1cc94183be27699bf6995fec71572a468e8"} Feb 01 07:26:01 crc kubenswrapper[4650]: I0201 07:26:01.461431 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:01 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:01 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:01 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:01 crc kubenswrapper[4650]: I0201 07:26:01.461501 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.076181 4650 patch_prober.go:28] interesting pod/downloads-7954f5f757-xfg9f container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.076189 4650 patch_prober.go:28] interesting pod/downloads-7954f5f757-xfg9f container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.076809 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xfg9f" podUID="169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.076852 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-xfg9f" podUID="169b2f4a-742c-4e6c-a6cb-f2f08bdeccb4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.320689 4650 patch_prober.go:28] interesting pod/console-f9d7485db-snf8v container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.320764 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-snf8v" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.465238 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:02 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:02 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:02 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.465323 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:02 crc kubenswrapper[4650]: I0201 07:26:02.468506 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"19c50de5-6a0c-4038-8f55-268a21ec47ea","Type":"ContainerStarted","Data":"97bb0ceef242d07b7be939ad1acfd1361bbde33689d46d8bd513b9c9654dd65d"} Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.072782 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.117417 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/725de557-211c-45a1-95c6-a694f835e02f-kubelet-dir\") pod \"725de557-211c-45a1-95c6-a694f835e02f\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.117583 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/725de557-211c-45a1-95c6-a694f835e02f-kube-api-access\") pod \"725de557-211c-45a1-95c6-a694f835e02f\" (UID: \"725de557-211c-45a1-95c6-a694f835e02f\") " Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.119678 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/725de557-211c-45a1-95c6-a694f835e02f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "725de557-211c-45a1-95c6-a694f835e02f" (UID: "725de557-211c-45a1-95c6-a694f835e02f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.130753 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/725de557-211c-45a1-95c6-a694f835e02f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "725de557-211c-45a1-95c6-a694f835e02f" (UID: "725de557-211c-45a1-95c6-a694f835e02f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.221629 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/725de557-211c-45a1-95c6-a694f835e02f-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.221672 4650 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/725de557-211c-45a1-95c6-a694f835e02f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.458835 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:03 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:03 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:03 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.458907 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.528915 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.528920 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"725de557-211c-45a1-95c6-a694f835e02f","Type":"ContainerDied","Data":"58a3a70c6c29d35c4634b17a51257fc7489489f894de49b81eb1f2a70ff554f2"} Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.528992 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58a3a70c6c29d35c4634b17a51257fc7489489f894de49b81eb1f2a70ff554f2" Feb 01 07:26:03 crc kubenswrapper[4650]: I0201 07:26:03.561929 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=4.561906424 podStartE2EDuration="4.561906424s" podCreationTimestamp="2026-02-01 07:25:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:26:03.552128616 +0000 UTC m=+162.275226861" watchObservedRunningTime="2026-02-01 07:26:03.561906424 +0000 UTC m=+162.285004669" Feb 01 07:26:04 crc kubenswrapper[4650]: I0201 07:26:04.463083 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:04 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:04 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:04 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:04 crc kubenswrapper[4650]: I0201 07:26:04.463790 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:04 crc kubenswrapper[4650]: I0201 07:26:04.583581 4650 generic.go:334] "Generic (PLEG): container finished" podID="19c50de5-6a0c-4038-8f55-268a21ec47ea" containerID="97bb0ceef242d07b7be939ad1acfd1361bbde33689d46d8bd513b9c9654dd65d" exitCode=0 Feb 01 07:26:04 crc kubenswrapper[4650]: I0201 07:26:04.584842 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"19c50de5-6a0c-4038-8f55-268a21ec47ea","Type":"ContainerDied","Data":"97bb0ceef242d07b7be939ad1acfd1361bbde33689d46d8bd513b9c9654dd65d"} Feb 01 07:26:05 crc kubenswrapper[4650]: I0201 07:26:05.455289 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:05 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:05 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:05 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:05 crc kubenswrapper[4650]: I0201 07:26:05.455648 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:05 crc kubenswrapper[4650]: I0201 07:26:05.673073 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:26:05 crc kubenswrapper[4650]: I0201 07:26:05.697726 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4593d40-c6e1-42fa-8c18-053ff31304b3-metrics-certs\") pod \"network-metrics-daemon-jvgsf\" (UID: \"f4593d40-c6e1-42fa-8c18-053ff31304b3\") " pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:26:05 crc kubenswrapper[4650]: I0201 07:26:05.893211 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jvgsf" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.133899 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.288374 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/19c50de5-6a0c-4038-8f55-268a21ec47ea-kubelet-dir\") pod \"19c50de5-6a0c-4038-8f55-268a21ec47ea\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.288429 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/19c50de5-6a0c-4038-8f55-268a21ec47ea-kube-api-access\") pod \"19c50de5-6a0c-4038-8f55-268a21ec47ea\" (UID: \"19c50de5-6a0c-4038-8f55-268a21ec47ea\") " Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.288912 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/19c50de5-6a0c-4038-8f55-268a21ec47ea-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "19c50de5-6a0c-4038-8f55-268a21ec47ea" (UID: "19c50de5-6a0c-4038-8f55-268a21ec47ea"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.296735 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19c50de5-6a0c-4038-8f55-268a21ec47ea-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "19c50de5-6a0c-4038-8f55-268a21ec47ea" (UID: "19c50de5-6a0c-4038-8f55-268a21ec47ea"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.390616 4650 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/19c50de5-6a0c-4038-8f55-268a21ec47ea-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.390677 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/19c50de5-6a0c-4038-8f55-268a21ec47ea-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.457605 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:06 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:06 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:06 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.457694 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.636855 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"19c50de5-6a0c-4038-8f55-268a21ec47ea","Type":"ContainerDied","Data":"9c3b3a1cbacd60daf642a98319baa1cc94183be27699bf6995fec71572a468e8"} Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.636906 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c3b3a1cbacd60daf642a98319baa1cc94183be27699bf6995fec71572a468e8" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.636953 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 01 07:26:06 crc kubenswrapper[4650]: I0201 07:26:06.725558 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jvgsf"] Feb 01 07:26:07 crc kubenswrapper[4650]: I0201 07:26:07.161500 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:26:07 crc kubenswrapper[4650]: I0201 07:26:07.162298 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:26:07 crc kubenswrapper[4650]: I0201 07:26:07.456436 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:07 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:07 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:07 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:07 crc kubenswrapper[4650]: I0201 07:26:07.456514 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:07 crc kubenswrapper[4650]: I0201 07:26:07.668816 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" event={"ID":"f4593d40-c6e1-42fa-8c18-053ff31304b3","Type":"ContainerStarted","Data":"f3912cb93634af45fb97b52ca8ec30663edc9cce0031f511e35c6066a5643d25"} Feb 01 07:26:08 crc kubenswrapper[4650]: I0201 07:26:08.455701 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:08 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:08 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:08 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:08 crc kubenswrapper[4650]: I0201 07:26:08.456210 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:08 crc kubenswrapper[4650]: I0201 07:26:08.722135 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" event={"ID":"f4593d40-c6e1-42fa-8c18-053ff31304b3","Type":"ContainerStarted","Data":"aec74588824475a0b02993cd6674645c08c7999e668ddf6be699d38637b59bcc"} Feb 01 07:26:09 crc kubenswrapper[4650]: I0201 07:26:09.291803 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5fhkk"] Feb 01 07:26:09 crc kubenswrapper[4650]: I0201 07:26:09.292046 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" containerID="cri-o://cfa32a7a59035255d2bac7b3a4321aa5ebeaffdfe40618d493cedb0900f9536b" gracePeriod=30 Feb 01 07:26:09 crc kubenswrapper[4650]: I0201 07:26:09.330615 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j"] Feb 01 07:26:09 crc kubenswrapper[4650]: I0201 07:26:09.330901 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" containerID="cri-o://744132a0de141f8ea0241edc43475b7614df3bca2e0d69c7c4a88b7c44a85e70" gracePeriod=30 Feb 01 07:26:09 crc kubenswrapper[4650]: I0201 07:26:09.453584 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:09 crc kubenswrapper[4650]: [-]has-synced failed: reason withheld Feb 01 07:26:09 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:09 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:09 crc kubenswrapper[4650]: I0201 07:26:09.453673 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:10 crc kubenswrapper[4650]: I0201 07:26:10.454861 4650 patch_prober.go:28] interesting pod/router-default-5444994796-zkcsv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 01 07:26:10 crc kubenswrapper[4650]: [+]has-synced ok Feb 01 07:26:10 crc kubenswrapper[4650]: [+]process-running ok Feb 01 07:26:10 crc kubenswrapper[4650]: healthz check failed Feb 01 07:26:10 crc kubenswrapper[4650]: I0201 07:26:10.454925 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zkcsv" podUID="0d352c71-f363-4f44-abba-d535c50f6497" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 01 07:26:11 crc kubenswrapper[4650]: I0201 07:26:11.168558 4650 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-5rj7j container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 01 07:26:11 crc kubenswrapper[4650]: I0201 07:26:11.168632 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 01 07:26:11 crc kubenswrapper[4650]: I0201 07:26:11.454900 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:26:11 crc kubenswrapper[4650]: I0201 07:26:11.457514 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-zkcsv" Feb 01 07:26:12 crc kubenswrapper[4650]: I0201 07:26:12.091386 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-xfg9f" Feb 01 07:26:12 crc kubenswrapper[4650]: I0201 07:26:12.321103 4650 patch_prober.go:28] interesting pod/console-f9d7485db-snf8v container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 01 07:26:12 crc kubenswrapper[4650]: I0201 07:26:12.321158 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-snf8v" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 01 07:26:12 crc kubenswrapper[4650]: I0201 07:26:12.447454 4650 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5fhkk container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:26:12 crc kubenswrapper[4650]: I0201 07:26:12.453480 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:26:15 crc kubenswrapper[4650]: I0201 07:26:15.907206 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.168391 4650 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-5rj7j container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.169512 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.442217 4650 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5fhkk container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.442287 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.870929 4650 generic.go:334] "Generic (PLEG): container finished" podID="6568a223-ba53-4690-9378-08b043d9db27" containerID="cfa32a7a59035255d2bac7b3a4321aa5ebeaffdfe40618d493cedb0900f9536b" exitCode=0 Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.871050 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" event={"ID":"6568a223-ba53-4690-9378-08b043d9db27","Type":"ContainerDied","Data":"cfa32a7a59035255d2bac7b3a4321aa5ebeaffdfe40618d493cedb0900f9536b"} Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.880602 4650 generic.go:334] "Generic (PLEG): container finished" podID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerID="744132a0de141f8ea0241edc43475b7614df3bca2e0d69c7c4a88b7c44a85e70" exitCode=0 Feb 01 07:26:21 crc kubenswrapper[4650]: I0201 07:26:21.880667 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" event={"ID":"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd","Type":"ContainerDied","Data":"744132a0de141f8ea0241edc43475b7614df3bca2e0d69c7c4a88b7c44a85e70"} Feb 01 07:26:22 crc kubenswrapper[4650]: I0201 07:26:22.326182 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:26:22 crc kubenswrapper[4650]: I0201 07:26:22.329837 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:26:23 crc kubenswrapper[4650]: I0201 07:26:23.443244 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-b7kgx" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.878514 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.975753 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.998481 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j" event={"ID":"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd","Type":"ContainerDied","Data":"6fbd678fd6ea677af7d2e774dfa29bcfe6d2c9fe4a233dfb10821cf3064f60c5"} Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999007 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n"] Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999001 4650 scope.go:117] "RemoveContainer" containerID="744132a0de141f8ea0241edc43475b7614df3bca2e0d69c7c4a88b7c44a85e70" Feb 01 07:26:25 crc kubenswrapper[4650]: E0201 07:26:25.999311 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19c50de5-6a0c-4038-8f55-268a21ec47ea" containerName="pruner" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999328 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="19c50de5-6a0c-4038-8f55-268a21ec47ea" containerName="pruner" Feb 01 07:26:25 crc kubenswrapper[4650]: E0201 07:26:25.999343 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999351 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" Feb 01 07:26:25 crc kubenswrapper[4650]: E0201 07:26:25.999361 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="725de557-211c-45a1-95c6-a694f835e02f" containerName="pruner" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999368 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="725de557-211c-45a1-95c6-a694f835e02f" containerName="pruner" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999562 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="19c50de5-6a0c-4038-8f55-268a21ec47ea" containerName="pruner" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999572 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="725de557-211c-45a1-95c6-a694f835e02f" containerName="pruner" Feb 01 07:26:25 crc kubenswrapper[4650]: I0201 07:26:25.999583 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" containerName="route-controller-manager" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.000230 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.001323 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n"] Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.056638 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-config\") pod \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.056867 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-client-ca\") pod \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.056899 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbcgl\" (UniqueName: \"kubernetes.io/projected/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-kube-api-access-pbcgl\") pod \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.056937 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-serving-cert\") pod \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\" (UID: \"f7e7bdc8-3445-4a2f-8ada-1c2223086fcd\") " Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.058440 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-config" (OuterVolumeSpecName: "config") pod "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" (UID: "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.058955 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-client-ca" (OuterVolumeSpecName: "client-ca") pod "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" (UID: "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.067729 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" (UID: "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.092853 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-kube-api-access-pbcgl" (OuterVolumeSpecName: "kube-api-access-pbcgl") pod "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" (UID: "f7e7bdc8-3445-4a2f-8ada-1c2223086fcd"). InnerVolumeSpecName "kube-api-access-pbcgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159154 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-serving-cert\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159231 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-config\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159284 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtdhq\" (UniqueName: \"kubernetes.io/projected/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-kube-api-access-jtdhq\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159329 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-client-ca\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159369 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159381 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbcgl\" (UniqueName: \"kubernetes.io/projected/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-kube-api-access-pbcgl\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159389 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.159399 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.272913 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtdhq\" (UniqueName: \"kubernetes.io/projected/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-kube-api-access-jtdhq\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.273406 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-client-ca\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.273554 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-serving-cert\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.273718 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-config\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.275702 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-config\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.278281 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-client-ca\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.279269 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-serving-cert\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.294894 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtdhq\" (UniqueName: \"kubernetes.io/projected/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-kube-api-access-jtdhq\") pod \"route-controller-manager-7bd8785496-ccl6n\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.323475 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j"] Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.326342 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-5rj7j"] Feb 01 07:26:26 crc kubenswrapper[4650]: I0201 07:26:26.328199 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:27 crc kubenswrapper[4650]: I0201 07:26:27.973942 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7e7bdc8-3445-4a2f-8ada-1c2223086fcd" path="/var/lib/kubelet/pods/f7e7bdc8-3445-4a2f-8ada-1c2223086fcd/volumes" Feb 01 07:26:29 crc kubenswrapper[4650]: I0201 07:26:29.403402 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n"] Feb 01 07:26:29 crc kubenswrapper[4650]: I0201 07:26:29.790715 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 01 07:26:32 crc kubenswrapper[4650]: I0201 07:26:32.442395 4650 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5fhkk container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 01 07:26:32 crc kubenswrapper[4650]: I0201 07:26:32.443053 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.573534 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.575342 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.575664 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.577525 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.578679 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.753196 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/156438bd-994b-4c20-929e-19a2ff7a7be1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.753325 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/156438bd-994b-4c20-929e-19a2ff7a7be1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.854989 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/156438bd-994b-4c20-929e-19a2ff7a7be1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.855078 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/156438bd-994b-4c20-929e-19a2ff7a7be1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.855155 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/156438bd-994b-4c20-929e-19a2ff7a7be1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.879666 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/156438bd-994b-4c20-929e-19a2ff7a7be1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:35 crc kubenswrapper[4650]: I0201 07:26:35.902691 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:37 crc kubenswrapper[4650]: I0201 07:26:37.161912 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:26:37 crc kubenswrapper[4650]: I0201 07:26:37.163407 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:26:37 crc kubenswrapper[4650]: I0201 07:26:37.532474 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r65td"] Feb 01 07:26:38 crc kubenswrapper[4650]: I0201 07:26:38.859592 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:26:38 crc kubenswrapper[4650]: I0201 07:26:38.896711 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt"] Feb 01 07:26:38 crc kubenswrapper[4650]: E0201 07:26:38.896946 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" Feb 01 07:26:38 crc kubenswrapper[4650]: I0201 07:26:38.896963 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" Feb 01 07:26:38 crc kubenswrapper[4650]: I0201 07:26:38.897088 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6568a223-ba53-4690-9378-08b043d9db27" containerName="controller-manager" Feb 01 07:26:38 crc kubenswrapper[4650]: I0201 07:26:38.897489 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:38 crc kubenswrapper[4650]: I0201 07:26:38.926717 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt"] Feb 01 07:26:38 crc kubenswrapper[4650]: E0201 07:26:38.953204 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 01 07:26:38 crc kubenswrapper[4650]: E0201 07:26:38.953464 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kn9pd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5s2bx_openshift-marketplace(81e5cda9-5051-4b4d-a3fb-3acb0b780e4c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 01 07:26:38 crc kubenswrapper[4650]: E0201 07:26:38.954832 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-5s2bx" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.027774 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-client-ca\") pod \"6568a223-ba53-4690-9378-08b043d9db27\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028167 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6568a223-ba53-4690-9378-08b043d9db27-serving-cert\") pod \"6568a223-ba53-4690-9378-08b043d9db27\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028225 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-proxy-ca-bundles\") pod \"6568a223-ba53-4690-9378-08b043d9db27\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028250 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jj6ds\" (UniqueName: \"kubernetes.io/projected/6568a223-ba53-4690-9378-08b043d9db27-kube-api-access-jj6ds\") pod \"6568a223-ba53-4690-9378-08b043d9db27\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028287 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-config\") pod \"6568a223-ba53-4690-9378-08b043d9db27\" (UID: \"6568a223-ba53-4690-9378-08b043d9db27\") " Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028383 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-client-ca\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028413 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-config\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028440 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn2pg\" (UniqueName: \"kubernetes.io/projected/bca26641-9399-4eb6-b2d2-59398c7a8a05-kube-api-access-kn2pg\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028477 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bca26641-9399-4eb6-b2d2-59398c7a8a05-serving-cert\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028494 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-proxy-ca-bundles\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.028617 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-client-ca" (OuterVolumeSpecName: "client-ca") pod "6568a223-ba53-4690-9378-08b043d9db27" (UID: "6568a223-ba53-4690-9378-08b043d9db27"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.029049 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6568a223-ba53-4690-9378-08b043d9db27" (UID: "6568a223-ba53-4690-9378-08b043d9db27"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.029495 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-config" (OuterVolumeSpecName: "config") pod "6568a223-ba53-4690-9378-08b043d9db27" (UID: "6568a223-ba53-4690-9378-08b043d9db27"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.036293 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6568a223-ba53-4690-9378-08b043d9db27-kube-api-access-jj6ds" (OuterVolumeSpecName: "kube-api-access-jj6ds") pod "6568a223-ba53-4690-9378-08b043d9db27" (UID: "6568a223-ba53-4690-9378-08b043d9db27"). InnerVolumeSpecName "kube-api-access-jj6ds". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.040374 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6568a223-ba53-4690-9378-08b043d9db27-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6568a223-ba53-4690-9378-08b043d9db27" (UID: "6568a223-ba53-4690-9378-08b043d9db27"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:26:39 crc kubenswrapper[4650]: E0201 07:26:39.077804 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 01 07:26:39 crc kubenswrapper[4650]: E0201 07:26:39.078234 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hr7rj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-9b67j_openshift-marketplace(6c5c24b4-b4cc-4577-8cbd-c299f6fd5561): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 01 07:26:39 crc kubenswrapper[4650]: E0201 07:26:39.079331 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-9b67j" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.090736 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.091436 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5fhkk" event={"ID":"6568a223-ba53-4690-9378-08b043d9db27","Type":"ContainerDied","Data":"5001a7699ea470bee3f1ea7bb5af69b7360643ba6f3e8e60c1fbb54beaa06ad1"} Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.091514 4650 scope.go:117] "RemoveContainer" containerID="cfa32a7a59035255d2bac7b3a4321aa5ebeaffdfe40618d493cedb0900f9536b" Feb 01 07:26:39 crc kubenswrapper[4650]: E0201 07:26:39.123725 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5s2bx" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" Feb 01 07:26:39 crc kubenswrapper[4650]: E0201 07:26:39.123803 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 01 07:26:39 crc kubenswrapper[4650]: E0201 07:26:39.123924 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bxrj2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-xn7fv_openshift-marketplace(029eaf1d-b5ae-4719-b2b1-243c9b8850db): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 01 07:26:39 crc kubenswrapper[4650]: E0201 07:26:39.126128 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-xn7fv" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.129508 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-client-ca\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.129550 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-config\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.129804 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn2pg\" (UniqueName: \"kubernetes.io/projected/bca26641-9399-4eb6-b2d2-59398c7a8a05-kube-api-access-kn2pg\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130013 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bca26641-9399-4eb6-b2d2-59398c7a8a05-serving-cert\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130058 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-proxy-ca-bundles\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130263 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130278 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jj6ds\" (UniqueName: \"kubernetes.io/projected/6568a223-ba53-4690-9378-08b043d9db27-kube-api-access-jj6ds\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130293 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130304 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6568a223-ba53-4690-9378-08b043d9db27-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130333 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6568a223-ba53-4690-9378-08b043d9db27-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.130768 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-client-ca\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.131495 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-proxy-ca-bundles\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.133324 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-config\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.147511 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bca26641-9399-4eb6-b2d2-59398c7a8a05-serving-cert\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.161181 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5fhkk"] Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.161914 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn2pg\" (UniqueName: \"kubernetes.io/projected/bca26641-9399-4eb6-b2d2-59398c7a8a05-kube-api-access-kn2pg\") pod \"controller-manager-655fc9d5bc-wqxtt\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.170377 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5fhkk"] Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.298109 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.333857 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n"] Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.421485 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.653731 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt"] Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.762429 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.763255 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.785901 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.948051 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kubelet-dir\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.948561 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-var-lock\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.948748 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kube-api-access\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:39 crc kubenswrapper[4650]: I0201 07:26:39.972162 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6568a223-ba53-4690-9378-08b043d9db27" path="/var/lib/kubelet/pods/6568a223-ba53-4690-9378-08b043d9db27/volumes" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.050327 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-var-lock\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.050443 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kube-api-access\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.050483 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kubelet-dir\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.050520 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-var-lock\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.050583 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kubelet-dir\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.075665 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kube-api-access\") pod \"installer-9-crc\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.097668 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jvgsf" event={"ID":"f4593d40-c6e1-42fa-8c18-053ff31304b3","Type":"ContainerStarted","Data":"2688a22e20c14f21099ba48cdff209bd0047e535a3e29113dc1fc37ffe0b46a3"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.101950 4650 generic.go:334] "Generic (PLEG): container finished" podID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerID="59e92e06ea68ad63db4c12ada7ac7f40b134b96eb5ecbfac6d0780314bb0c483" exitCode=0 Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.102136 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlgfx" event={"ID":"51dbc0bf-4be1-4dcc-b406-262067016c90","Type":"ContainerDied","Data":"59e92e06ea68ad63db4c12ada7ac7f40b134b96eb5ecbfac6d0780314bb0c483"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.104105 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" event={"ID":"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810","Type":"ContainerStarted","Data":"a92e0be476db84c1b32b3816c1127142f6bc9cdc5bade0dc183d5cf929286544"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.104198 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" event={"ID":"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810","Type":"ContainerStarted","Data":"6ce60ad9a134addf34b34eb52bcb55dd7c366848d161d8e09884d74cafe39b5b"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.112594 4650 generic.go:334] "Generic (PLEG): container finished" podID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerID="e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4" exitCode=0 Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.112704 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4v2s" event={"ID":"3ed3c67d-2427-4ee2-950c-0f705023db71","Type":"ContainerDied","Data":"e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.114819 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" event={"ID":"bca26641-9399-4eb6-b2d2-59398c7a8a05","Type":"ContainerStarted","Data":"ae11e9d80109da830f9d7b8f3e51c77ce1860c5a6c1e97785264b5aa674fc806"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.119203 4650 generic.go:334] "Generic (PLEG): container finished" podID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerID="ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79" exitCode=0 Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.119264 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwqn7" event={"ID":"15308cf7-fed5-4bf2-84e9-ff7ea341303f","Type":"ContainerDied","Data":"ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.140777 4650 generic.go:334] "Generic (PLEG): container finished" podID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerID="2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3" exitCode=0 Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.140866 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2f7qv" event={"ID":"86213c9a-3227-4329-8a45-83e5b550f4a2","Type":"ContainerDied","Data":"2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.152429 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-jvgsf" podStartSLOduration=178.152399634 podStartE2EDuration="2m58.152399634s" podCreationTimestamp="2026-02-01 07:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:26:40.118325273 +0000 UTC m=+198.841423528" watchObservedRunningTime="2026-02-01 07:26:40.152399634 +0000 UTC m=+198.875497879" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.159361 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"156438bd-994b-4c20-929e-19a2ff7a7be1","Type":"ContainerStarted","Data":"25ce60bb297fbbde2c9a5775b7c9b3e076010b4d08d7fea490151b37f402dc8d"} Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.164162 4650 generic.go:334] "Generic (PLEG): container finished" podID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerID="3407debaed3b9e454ab05df9f9cc1de75ba19a0080417e33b4c195e8edbdbc35" exitCode=0 Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.165281 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfl8l" event={"ID":"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0","Type":"ContainerDied","Data":"3407debaed3b9e454ab05df9f9cc1de75ba19a0080417e33b4c195e8edbdbc35"} Feb 01 07:26:40 crc kubenswrapper[4650]: E0201 07:26:40.167677 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-xn7fv" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.288244 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:26:40 crc kubenswrapper[4650]: I0201 07:26:40.567691 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.174068 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"156438bd-994b-4c20-929e-19a2ff7a7be1","Type":"ContainerStarted","Data":"ea94db4c8c569d7ffdcb4721ad6f5894b6089a586bb458f1030b02eb84348d6a"} Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.176647 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"285b9d60-7e2e-4df8-811a-ddc59b103d1e","Type":"ContainerStarted","Data":"8ca9623a850773c08b9b9085bdd633aad2461be67e9ef916b4ec987e0f3e750a"} Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.178530 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" event={"ID":"bca26641-9399-4eb6-b2d2-59398c7a8a05","Type":"ContainerStarted","Data":"1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216"} Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.178992 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.179209 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" podUID="61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" containerName="route-controller-manager" containerID="cri-o://a92e0be476db84c1b32b3816c1127142f6bc9cdc5bade0dc183d5cf929286544" gracePeriod=30 Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.198630 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.201730 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=6.201706748 podStartE2EDuration="6.201706748s" podCreationTimestamp="2026-02-01 07:26:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:26:41.197891987 +0000 UTC m=+199.920990242" watchObservedRunningTime="2026-02-01 07:26:41.201706748 +0000 UTC m=+199.924804993" Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.254657 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" podStartSLOduration=12.254626726 podStartE2EDuration="12.254626726s" podCreationTimestamp="2026-02-01 07:26:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:26:41.229253388 +0000 UTC m=+199.952351633" watchObservedRunningTime="2026-02-01 07:26:41.254626726 +0000 UTC m=+199.977724971" Feb 01 07:26:41 crc kubenswrapper[4650]: I0201 07:26:41.257584 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" podStartSLOduration=32.257577255 podStartE2EDuration="32.257577255s" podCreationTimestamp="2026-02-01 07:26:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:26:41.252597011 +0000 UTC m=+199.975695266" watchObservedRunningTime="2026-02-01 07:26:41.257577255 +0000 UTC m=+199.980675500" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.191201 4650 generic.go:334] "Generic (PLEG): container finished" podID="61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" containerID="a92e0be476db84c1b32b3816c1127142f6bc9cdc5bade0dc183d5cf929286544" exitCode=0 Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.192353 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" event={"ID":"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810","Type":"ContainerDied","Data":"a92e0be476db84c1b32b3816c1127142f6bc9cdc5bade0dc183d5cf929286544"} Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.195869 4650 generic.go:334] "Generic (PLEG): container finished" podID="156438bd-994b-4c20-929e-19a2ff7a7be1" containerID="ea94db4c8c569d7ffdcb4721ad6f5894b6089a586bb458f1030b02eb84348d6a" exitCode=0 Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.195952 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"156438bd-994b-4c20-929e-19a2ff7a7be1","Type":"ContainerDied","Data":"ea94db4c8c569d7ffdcb4721ad6f5894b6089a586bb458f1030b02eb84348d6a"} Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.200856 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"285b9d60-7e2e-4df8-811a-ddc59b103d1e","Type":"ContainerStarted","Data":"b96d3b2242b4b243eb8c9cc6cd964e62def33fffb2250364fbc55c336dcb9ebf"} Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.253661 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=3.25363233 podStartE2EDuration="3.25363233s" podCreationTimestamp="2026-02-01 07:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:26:42.245492369 +0000 UTC m=+200.968590614" watchObservedRunningTime="2026-02-01 07:26:42.25363233 +0000 UTC m=+200.976730575" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.283871 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.319426 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp"] Feb 01 07:26:42 crc kubenswrapper[4650]: E0201 07:26:42.319854 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" containerName="route-controller-manager" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.319878 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" containerName="route-controller-manager" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.320045 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" containerName="route-controller-manager" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.321142 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.345491 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp"] Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.385418 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtdhq\" (UniqueName: \"kubernetes.io/projected/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-kube-api-access-jtdhq\") pod \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.385531 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-client-ca\") pod \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.385601 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-config\") pod \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.385682 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-serving-cert\") pod \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\" (UID: \"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810\") " Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.385960 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-config\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.385992 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-client-ca\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.386055 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rfrf\" (UniqueName: \"kubernetes.io/projected/99f68f70-14ee-4a44-9629-f32a18409edc-kube-api-access-6rfrf\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.386095 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99f68f70-14ee-4a44-9629-f32a18409edc-serving-cert\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.386710 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-client-ca" (OuterVolumeSpecName: "client-ca") pod "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" (UID: "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.386903 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-config" (OuterVolumeSpecName: "config") pod "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" (UID: "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.392311 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-kube-api-access-jtdhq" (OuterVolumeSpecName: "kube-api-access-jtdhq") pod "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" (UID: "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810"). InnerVolumeSpecName "kube-api-access-jtdhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.395443 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" (UID: "61b6a7ac-a6d6-4bcc-8ba5-2d8339577810"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487173 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-config\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487216 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-client-ca\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487259 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rfrf\" (UniqueName: \"kubernetes.io/projected/99f68f70-14ee-4a44-9629-f32a18409edc-kube-api-access-6rfrf\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487303 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99f68f70-14ee-4a44-9629-f32a18409edc-serving-cert\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487380 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487394 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487404 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.487414 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtdhq\" (UniqueName: \"kubernetes.io/projected/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810-kube-api-access-jtdhq\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.488785 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-config\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.488858 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-client-ca\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.493949 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99f68f70-14ee-4a44-9629-f32a18409edc-serving-cert\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.507585 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rfrf\" (UniqueName: \"kubernetes.io/projected/99f68f70-14ee-4a44-9629-f32a18409edc-kube-api-access-6rfrf\") pod \"route-controller-manager-5c8f774cf7-m4sbp\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:42 crc kubenswrapper[4650]: I0201 07:26:42.650959 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.222890 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.223309 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n" event={"ID":"61b6a7ac-a6d6-4bcc-8ba5-2d8339577810","Type":"ContainerDied","Data":"6ce60ad9a134addf34b34eb52bcb55dd7c366848d161d8e09884d74cafe39b5b"} Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.223427 4650 scope.go:117] "RemoveContainer" containerID="a92e0be476db84c1b32b3816c1127142f6bc9cdc5bade0dc183d5cf929286544" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.254337 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp"] Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.283364 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n"] Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.310142 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bd8785496-ccl6n"] Feb 01 07:26:43 crc kubenswrapper[4650]: W0201 07:26:43.314246 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99f68f70_14ee_4a44_9629_f32a18409edc.slice/crio-30b08df0a4fa53a4dcb4c4aab12c0a1710cbaeb3b73fe1adadf56f1445052127 WatchSource:0}: Error finding container 30b08df0a4fa53a4dcb4c4aab12c0a1710cbaeb3b73fe1adadf56f1445052127: Status 404 returned error can't find the container with id 30b08df0a4fa53a4dcb4c4aab12c0a1710cbaeb3b73fe1adadf56f1445052127 Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.534792 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.608063 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/156438bd-994b-4c20-929e-19a2ff7a7be1-kube-api-access\") pod \"156438bd-994b-4c20-929e-19a2ff7a7be1\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.608542 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/156438bd-994b-4c20-929e-19a2ff7a7be1-kubelet-dir\") pod \"156438bd-994b-4c20-929e-19a2ff7a7be1\" (UID: \"156438bd-994b-4c20-929e-19a2ff7a7be1\") " Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.608795 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/156438bd-994b-4c20-929e-19a2ff7a7be1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "156438bd-994b-4c20-929e-19a2ff7a7be1" (UID: "156438bd-994b-4c20-929e-19a2ff7a7be1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.609063 4650 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/156438bd-994b-4c20-929e-19a2ff7a7be1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.615330 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/156438bd-994b-4c20-929e-19a2ff7a7be1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "156438bd-994b-4c20-929e-19a2ff7a7be1" (UID: "156438bd-994b-4c20-929e-19a2ff7a7be1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.710300 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/156438bd-994b-4c20-929e-19a2ff7a7be1-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:43 crc kubenswrapper[4650]: I0201 07:26:43.978594 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61b6a7ac-a6d6-4bcc-8ba5-2d8339577810" path="/var/lib/kubelet/pods/61b6a7ac-a6d6-4bcc-8ba5-2d8339577810/volumes" Feb 01 07:26:44 crc kubenswrapper[4650]: I0201 07:26:44.232341 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" event={"ID":"99f68f70-14ee-4a44-9629-f32a18409edc","Type":"ContainerStarted","Data":"30b08df0a4fa53a4dcb4c4aab12c0a1710cbaeb3b73fe1adadf56f1445052127"} Feb 01 07:26:44 crc kubenswrapper[4650]: I0201 07:26:44.237308 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2f7qv" event={"ID":"86213c9a-3227-4329-8a45-83e5b550f4a2","Type":"ContainerStarted","Data":"b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251"} Feb 01 07:26:44 crc kubenswrapper[4650]: I0201 07:26:44.246749 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"156438bd-994b-4c20-929e-19a2ff7a7be1","Type":"ContainerDied","Data":"25ce60bb297fbbde2c9a5775b7c9b3e076010b4d08d7fea490151b37f402dc8d"} Feb 01 07:26:44 crc kubenswrapper[4650]: I0201 07:26:44.246804 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25ce60bb297fbbde2c9a5775b7c9b3e076010b4d08d7fea490151b37f402dc8d" Feb 01 07:26:44 crc kubenswrapper[4650]: I0201 07:26:44.246820 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 01 07:26:44 crc kubenswrapper[4650]: I0201 07:26:44.258630 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2f7qv" podStartSLOduration=4.184330385 podStartE2EDuration="51.258609115s" podCreationTimestamp="2026-02-01 07:25:53 +0000 UTC" firstStartedPulling="2026-02-01 07:25:55.96417657 +0000 UTC m=+154.687274815" lastFinishedPulling="2026-02-01 07:26:43.03845531 +0000 UTC m=+201.761553545" observedRunningTime="2026-02-01 07:26:44.257604778 +0000 UTC m=+202.980703023" watchObservedRunningTime="2026-02-01 07:26:44.258609115 +0000 UTC m=+202.981707360" Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.260610 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4v2s" event={"ID":"3ed3c67d-2427-4ee2-950c-0f705023db71","Type":"ContainerStarted","Data":"6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822"} Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.267676 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwqn7" event={"ID":"15308cf7-fed5-4bf2-84e9-ff7ea341303f","Type":"ContainerStarted","Data":"b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6"} Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.269797 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" event={"ID":"99f68f70-14ee-4a44-9629-f32a18409edc","Type":"ContainerStarted","Data":"adff36b140a94af5ce8823b4c7b6d44ce8edbaebe922d44895ad06018050acb9"} Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.270426 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.274392 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlgfx" event={"ID":"51dbc0bf-4be1-4dcc-b406-262067016c90","Type":"ContainerStarted","Data":"f8c4cef549eb915c29f1600869d8a8b2e80a9b50126880a81df393c51c72dc51"} Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.277160 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfl8l" event={"ID":"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0","Type":"ContainerStarted","Data":"0ea2c6d0b8c9a44d4da58af04d3fc73f1786fbd62ea417ea20c550041f6bd32b"} Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.283484 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-d4v2s" podStartSLOduration=5.383115048 podStartE2EDuration="55.283471586s" podCreationTimestamp="2026-02-01 07:25:51 +0000 UTC" firstStartedPulling="2026-02-01 07:25:55.970977163 +0000 UTC m=+154.694075408" lastFinishedPulling="2026-02-01 07:26:45.871333701 +0000 UTC m=+204.594431946" observedRunningTime="2026-02-01 07:26:46.281318276 +0000 UTC m=+205.004416531" watchObservedRunningTime="2026-02-01 07:26:46.283471586 +0000 UTC m=+205.006569831" Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.335415 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sfl8l" podStartSLOduration=4.186330828 podStartE2EDuration="53.335393337s" podCreationTimestamp="2026-02-01 07:25:53 +0000 UTC" firstStartedPulling="2026-02-01 07:25:55.964168469 +0000 UTC m=+154.687266714" lastFinishedPulling="2026-02-01 07:26:45.113230958 +0000 UTC m=+203.836329223" observedRunningTime="2026-02-01 07:26:46.33332893 +0000 UTC m=+205.056427175" watchObservedRunningTime="2026-02-01 07:26:46.335393337 +0000 UTC m=+205.058491582" Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.362892 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" podStartSLOduration=17.362870273 podStartE2EDuration="17.362870273s" podCreationTimestamp="2026-02-01 07:26:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:26:46.361633767 +0000 UTC m=+205.084732022" watchObservedRunningTime="2026-02-01 07:26:46.362870273 +0000 UTC m=+205.085968518" Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.388922 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kwqn7" podStartSLOduration=5.410389745 podStartE2EDuration="55.388894726s" podCreationTimestamp="2026-02-01 07:25:51 +0000 UTC" firstStartedPulling="2026-02-01 07:25:55.899122063 +0000 UTC m=+154.622220308" lastFinishedPulling="2026-02-01 07:26:45.877627044 +0000 UTC m=+204.600725289" observedRunningTime="2026-02-01 07:26:46.387151071 +0000 UTC m=+205.110249326" watchObservedRunningTime="2026-02-01 07:26:46.388894726 +0000 UTC m=+205.111992971" Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.420403 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zlgfx" podStartSLOduration=5.568607795 podStartE2EDuration="55.4203778s" podCreationTimestamp="2026-02-01 07:25:51 +0000 UTC" firstStartedPulling="2026-02-01 07:25:55.964439007 +0000 UTC m=+154.687537252" lastFinishedPulling="2026-02-01 07:26:45.816209012 +0000 UTC m=+204.539307257" observedRunningTime="2026-02-01 07:26:46.418555123 +0000 UTC m=+205.141653388" watchObservedRunningTime="2026-02-01 07:26:46.4203778 +0000 UTC m=+205.143476045" Feb 01 07:26:46 crc kubenswrapper[4650]: I0201 07:26:46.860235 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.026104 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.027326 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.070114 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.070866 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.430462 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.433696 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.476206 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.728334 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.728402 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:26:52 crc kubenswrapper[4650]: I0201 07:26:52.780866 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:26:53 crc kubenswrapper[4650]: I0201 07:26:53.358653 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:26:53 crc kubenswrapper[4650]: I0201 07:26:53.487793 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:26:53 crc kubenswrapper[4650]: I0201 07:26:53.918252 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:26:53 crc kubenswrapper[4650]: I0201 07:26:53.918484 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:26:53 crc kubenswrapper[4650]: I0201 07:26:53.972555 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:26:54 crc kubenswrapper[4650]: I0201 07:26:54.153176 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:26:54 crc kubenswrapper[4650]: I0201 07:26:54.153257 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:26:54 crc kubenswrapper[4650]: I0201 07:26:54.192180 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:26:54 crc kubenswrapper[4650]: I0201 07:26:54.368340 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d4v2s"] Feb 01 07:26:54 crc kubenswrapper[4650]: I0201 07:26:54.370751 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:26:54 crc kubenswrapper[4650]: I0201 07:26:54.379456 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:26:55 crc kubenswrapper[4650]: I0201 07:26:55.331719 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-d4v2s" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="registry-server" containerID="cri-o://6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822" gracePeriod=2 Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.053702 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.126128 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdxfh\" (UniqueName: \"kubernetes.io/projected/3ed3c67d-2427-4ee2-950c-0f705023db71-kube-api-access-cdxfh\") pod \"3ed3c67d-2427-4ee2-950c-0f705023db71\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.126233 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-utilities\") pod \"3ed3c67d-2427-4ee2-950c-0f705023db71\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.126363 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-catalog-content\") pod \"3ed3c67d-2427-4ee2-950c-0f705023db71\" (UID: \"3ed3c67d-2427-4ee2-950c-0f705023db71\") " Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.131578 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-utilities" (OuterVolumeSpecName: "utilities") pod "3ed3c67d-2427-4ee2-950c-0f705023db71" (UID: "3ed3c67d-2427-4ee2-950c-0f705023db71"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.152390 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ed3c67d-2427-4ee2-950c-0f705023db71-kube-api-access-cdxfh" (OuterVolumeSpecName: "kube-api-access-cdxfh") pod "3ed3c67d-2427-4ee2-950c-0f705023db71" (UID: "3ed3c67d-2427-4ee2-950c-0f705023db71"). InnerVolumeSpecName "kube-api-access-cdxfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.192372 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ed3c67d-2427-4ee2-950c-0f705023db71" (UID: "3ed3c67d-2427-4ee2-950c-0f705023db71"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.227884 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdxfh\" (UniqueName: \"kubernetes.io/projected/3ed3c67d-2427-4ee2-950c-0f705023db71-kube-api-access-cdxfh\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.227927 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.227941 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed3c67d-2427-4ee2-950c-0f705023db71-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.341271 4650 generic.go:334] "Generic (PLEG): container finished" podID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerID="6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822" exitCode=0 Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.341467 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4v2s" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.341514 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4v2s" event={"ID":"3ed3c67d-2427-4ee2-950c-0f705023db71","Type":"ContainerDied","Data":"6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822"} Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.341945 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4v2s" event={"ID":"3ed3c67d-2427-4ee2-950c-0f705023db71","Type":"ContainerDied","Data":"06639540c0d90b18b4099f051b74f12a187b4a36bd450bd57a12c74519291daa"} Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.342106 4650 scope.go:117] "RemoveContainer" containerID="6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.345095 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b67j" event={"ID":"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561","Type":"ContainerDied","Data":"cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05"} Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.344712 4650 generic.go:334] "Generic (PLEG): container finished" podID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerID="cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05" exitCode=0 Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.350702 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5s2bx" event={"ID":"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c","Type":"ContainerStarted","Data":"601574de42827a76aa92ff0c22aa81229b9326b6a09b90ed20e8a5cc04bf9800"} Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.358167 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn7fv" event={"ID":"029eaf1d-b5ae-4719-b2b1-243c9b8850db","Type":"ContainerStarted","Data":"dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75"} Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.386647 4650 scope.go:117] "RemoveContainer" containerID="e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.390214 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d4v2s"] Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.392697 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-d4v2s"] Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.406235 4650 scope.go:117] "RemoveContainer" containerID="e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.430598 4650 scope.go:117] "RemoveContainer" containerID="6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822" Feb 01 07:26:56 crc kubenswrapper[4650]: E0201 07:26:56.431130 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822\": container with ID starting with 6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822 not found: ID does not exist" containerID="6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.431161 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822"} err="failed to get container status \"6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822\": rpc error: code = NotFound desc = could not find container \"6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822\": container with ID starting with 6110df6e8c8fb565cb77439c8ec1cecf2fe8b9cee70d57f6e40fa43b49c21822 not found: ID does not exist" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.431199 4650 scope.go:117] "RemoveContainer" containerID="e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4" Feb 01 07:26:56 crc kubenswrapper[4650]: E0201 07:26:56.431458 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4\": container with ID starting with e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4 not found: ID does not exist" containerID="e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.431478 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4"} err="failed to get container status \"e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4\": rpc error: code = NotFound desc = could not find container \"e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4\": container with ID starting with e9bd678f3eceb93af7c4d793ce2a636a81dc2032cb8c926d1a7da2774c0556f4 not found: ID does not exist" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.431501 4650 scope.go:117] "RemoveContainer" containerID="e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056" Feb 01 07:26:56 crc kubenswrapper[4650]: E0201 07:26:56.431739 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056\": container with ID starting with e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056 not found: ID does not exist" containerID="e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.431768 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056"} err="failed to get container status \"e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056\": rpc error: code = NotFound desc = could not find container \"e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056\": container with ID starting with e52c1ebeb06526a4b1e42b57a5803f23baeb965561368a6a232299dad8486056 not found: ID does not exist" Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.758488 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2f7qv"] Feb 01 07:26:56 crc kubenswrapper[4650]: I0201 07:26:56.758762 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2f7qv" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="registry-server" containerID="cri-o://b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251" gracePeriod=2 Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.278933 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.350146 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-utilities\") pod \"86213c9a-3227-4329-8a45-83e5b550f4a2\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.350237 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kchjx\" (UniqueName: \"kubernetes.io/projected/86213c9a-3227-4329-8a45-83e5b550f4a2-kube-api-access-kchjx\") pod \"86213c9a-3227-4329-8a45-83e5b550f4a2\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.350402 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-catalog-content\") pod \"86213c9a-3227-4329-8a45-83e5b550f4a2\" (UID: \"86213c9a-3227-4329-8a45-83e5b550f4a2\") " Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.351062 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-utilities" (OuterVolumeSpecName: "utilities") pod "86213c9a-3227-4329-8a45-83e5b550f4a2" (UID: "86213c9a-3227-4329-8a45-83e5b550f4a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.356405 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86213c9a-3227-4329-8a45-83e5b550f4a2-kube-api-access-kchjx" (OuterVolumeSpecName: "kube-api-access-kchjx") pod "86213c9a-3227-4329-8a45-83e5b550f4a2" (UID: "86213c9a-3227-4329-8a45-83e5b550f4a2"). InnerVolumeSpecName "kube-api-access-kchjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.365402 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b67j" event={"ID":"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561","Type":"ContainerStarted","Data":"6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c"} Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.368088 4650 generic.go:334] "Generic (PLEG): container finished" podID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerID="601574de42827a76aa92ff0c22aa81229b9326b6a09b90ed20e8a5cc04bf9800" exitCode=0 Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.368175 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5s2bx" event={"ID":"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c","Type":"ContainerDied","Data":"601574de42827a76aa92ff0c22aa81229b9326b6a09b90ed20e8a5cc04bf9800"} Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.375294 4650 generic.go:334] "Generic (PLEG): container finished" podID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerID="b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251" exitCode=0 Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.375379 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2f7qv" event={"ID":"86213c9a-3227-4329-8a45-83e5b550f4a2","Type":"ContainerDied","Data":"b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251"} Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.375415 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2f7qv" event={"ID":"86213c9a-3227-4329-8a45-83e5b550f4a2","Type":"ContainerDied","Data":"15928b9a8051d1069220c2c86c2bcaf1705a019243d6d66a1ec21952713bc286"} Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.375438 4650 scope.go:117] "RemoveContainer" containerID="b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.375572 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2f7qv" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.379540 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86213c9a-3227-4329-8a45-83e5b550f4a2" (UID: "86213c9a-3227-4329-8a45-83e5b550f4a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.390326 4650 generic.go:334] "Generic (PLEG): container finished" podID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerID="dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75" exitCode=0 Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.390399 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn7fv" event={"ID":"029eaf1d-b5ae-4719-b2b1-243c9b8850db","Type":"ContainerDied","Data":"dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75"} Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.397564 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9b67j" podStartSLOduration=6.716829218 podStartE2EDuration="1m6.397543543s" podCreationTimestamp="2026-02-01 07:25:51 +0000 UTC" firstStartedPulling="2026-02-01 07:25:57.08944377 +0000 UTC m=+155.812542015" lastFinishedPulling="2026-02-01 07:26:56.770158095 +0000 UTC m=+215.493256340" observedRunningTime="2026-02-01 07:26:57.390869206 +0000 UTC m=+216.113967451" watchObservedRunningTime="2026-02-01 07:26:57.397543543 +0000 UTC m=+216.120641788" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.411879 4650 scope.go:117] "RemoveContainer" containerID="2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.452698 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.452730 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kchjx\" (UniqueName: \"kubernetes.io/projected/86213c9a-3227-4329-8a45-83e5b550f4a2-kube-api-access-kchjx\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.452740 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86213c9a-3227-4329-8a45-83e5b550f4a2-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.459109 4650 scope.go:117] "RemoveContainer" containerID="d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.480078 4650 scope.go:117] "RemoveContainer" containerID="b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251" Feb 01 07:26:57 crc kubenswrapper[4650]: E0201 07:26:57.481615 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251\": container with ID starting with b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251 not found: ID does not exist" containerID="b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.481658 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251"} err="failed to get container status \"b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251\": rpc error: code = NotFound desc = could not find container \"b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251\": container with ID starting with b978277320c9a47bb517019fadbf9cd3760a74ee3713f34720c5209fdc918251 not found: ID does not exist" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.481692 4650 scope.go:117] "RemoveContainer" containerID="2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3" Feb 01 07:26:57 crc kubenswrapper[4650]: E0201 07:26:57.482193 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3\": container with ID starting with 2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3 not found: ID does not exist" containerID="2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.482218 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3"} err="failed to get container status \"2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3\": rpc error: code = NotFound desc = could not find container \"2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3\": container with ID starting with 2cb50f87619fa1b00f2987dab92e685793b976d832e5f4872d3163d63e64a7f3 not found: ID does not exist" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.482233 4650 scope.go:117] "RemoveContainer" containerID="d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e" Feb 01 07:26:57 crc kubenswrapper[4650]: E0201 07:26:57.482509 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e\": container with ID starting with d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e not found: ID does not exist" containerID="d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.482534 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e"} err="failed to get container status \"d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e\": rpc error: code = NotFound desc = could not find container \"d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e\": container with ID starting with d8cfbbe3b3116fcf04181d1ea290313c5af0322a7f155eba325474867213d54e not found: ID does not exist" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.706590 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2f7qv"] Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.719198 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2f7qv"] Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.973830 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" path="/var/lib/kubelet/pods/3ed3c67d-2427-4ee2-950c-0f705023db71/volumes" Feb 01 07:26:57 crc kubenswrapper[4650]: I0201 07:26:57.974856 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" path="/var/lib/kubelet/pods/86213c9a-3227-4329-8a45-83e5b550f4a2/volumes" Feb 01 07:26:58 crc kubenswrapper[4650]: I0201 07:26:58.398212 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5s2bx" event={"ID":"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c","Type":"ContainerStarted","Data":"99829fed30d2f9425cf2487b6deef0716b3842b7058957b35701ec36e20722d1"} Feb 01 07:26:58 crc kubenswrapper[4650]: I0201 07:26:58.402262 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn7fv" event={"ID":"029eaf1d-b5ae-4719-b2b1-243c9b8850db","Type":"ContainerStarted","Data":"1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859"} Feb 01 07:26:58 crc kubenswrapper[4650]: I0201 07:26:58.446381 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5s2bx" podStartSLOduration=3.68477168 podStartE2EDuration="1m4.446358789s" podCreationTimestamp="2026-02-01 07:25:54 +0000 UTC" firstStartedPulling="2026-02-01 07:25:57.039508232 +0000 UTC m=+155.762606477" lastFinishedPulling="2026-02-01 07:26:57.801095341 +0000 UTC m=+216.524193586" observedRunningTime="2026-02-01 07:26:58.423575587 +0000 UTC m=+217.146673832" watchObservedRunningTime="2026-02-01 07:26:58.446358789 +0000 UTC m=+217.169457034" Feb 01 07:26:58 crc kubenswrapper[4650]: I0201 07:26:58.446816 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xn7fv" podStartSLOduration=2.708869464 podStartE2EDuration="1m3.446808946s" podCreationTimestamp="2026-02-01 07:25:55 +0000 UTC" firstStartedPulling="2026-02-01 07:25:57.089373508 +0000 UTC m=+155.812471753" lastFinishedPulling="2026-02-01 07:26:57.82731299 +0000 UTC m=+216.550411235" observedRunningTime="2026-02-01 07:26:58.445136424 +0000 UTC m=+217.168234669" watchObservedRunningTime="2026-02-01 07:26:58.446808946 +0000 UTC m=+217.169907191" Feb 01 07:27:02 crc kubenswrapper[4650]: I0201 07:27:02.343667 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:27:02 crc kubenswrapper[4650]: I0201 07:27:02.344421 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:27:02 crc kubenswrapper[4650]: I0201 07:27:02.405361 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:27:02 crc kubenswrapper[4650]: I0201 07:27:02.475402 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:27:02 crc kubenswrapper[4650]: I0201 07:27:02.615994 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" podUID="416589cc-479e-45e0-8fad-2ccd30115769" containerName="oauth-openshift" containerID="cri-o://a10e818e58bba69286a8ae0a636d84cdcce22ddde9ed5a9353e50e240b09c324" gracePeriod=15 Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.431945 4650 generic.go:334] "Generic (PLEG): container finished" podID="416589cc-479e-45e0-8fad-2ccd30115769" containerID="a10e818e58bba69286a8ae0a636d84cdcce22ddde9ed5a9353e50e240b09c324" exitCode=0 Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.432869 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" event={"ID":"416589cc-479e-45e0-8fad-2ccd30115769","Type":"ContainerDied","Data":"a10e818e58bba69286a8ae0a636d84cdcce22ddde9ed5a9353e50e240b09c324"} Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.558134 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9b67j"] Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.843527 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859488 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-login\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859535 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-service-ca\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859565 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-session\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859623 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-error\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859651 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-trusted-ca-bundle\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859676 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/416589cc-479e-45e0-8fad-2ccd30115769-audit-dir\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859708 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9v84t\" (UniqueName: \"kubernetes.io/projected/416589cc-479e-45e0-8fad-2ccd30115769-kube-api-access-9v84t\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859732 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-idp-0-file-data\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859757 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-cliconfig\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859779 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-provider-selection\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859818 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-audit-policies\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859837 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-serving-cert\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859859 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-router-certs\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.859881 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-ocp-branding-template\") pod \"416589cc-479e-45e0-8fad-2ccd30115769\" (UID: \"416589cc-479e-45e0-8fad-2ccd30115769\") " Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.860539 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.860921 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/416589cc-479e-45e0-8fad-2ccd30115769-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.861277 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.861679 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.861979 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.870529 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.871360 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.872829 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/416589cc-479e-45e0-8fad-2ccd30115769-kube-api-access-9v84t" (OuterVolumeSpecName: "kube-api-access-9v84t") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "kube-api-access-9v84t". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.881355 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.881936 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.882392 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.882717 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.888116 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.888397 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "416589cc-479e-45e0-8fad-2ccd30115769" (UID: "416589cc-479e-45e0-8fad-2ccd30115769"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961675 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961718 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961729 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961740 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961752 4650 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/416589cc-479e-45e0-8fad-2ccd30115769-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961762 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9v84t\" (UniqueName: \"kubernetes.io/projected/416589cc-479e-45e0-8fad-2ccd30115769-kube-api-access-9v84t\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961771 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961780 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961793 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961804 4650 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/416589cc-479e-45e0-8fad-2ccd30115769-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961815 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961828 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961838 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:03 crc kubenswrapper[4650]: I0201 07:27:03.961847 4650 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/416589cc-479e-45e0-8fad-2ccd30115769-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:04 crc kubenswrapper[4650]: I0201 07:27:04.440803 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" Feb 01 07:27:04 crc kubenswrapper[4650]: I0201 07:27:04.441798 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r65td" event={"ID":"416589cc-479e-45e0-8fad-2ccd30115769","Type":"ContainerDied","Data":"e79a2e0b314e56079798feb15955d302ce60a9c18553879678c3570368a0c00f"} Feb 01 07:27:04 crc kubenswrapper[4650]: I0201 07:27:04.442086 4650 scope.go:117] "RemoveContainer" containerID="a10e818e58bba69286a8ae0a636d84cdcce22ddde9ed5a9353e50e240b09c324" Feb 01 07:27:04 crc kubenswrapper[4650]: I0201 07:27:04.469932 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r65td"] Feb 01 07:27:04 crc kubenswrapper[4650]: I0201 07:27:04.473150 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r65td"] Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.102874 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.103865 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.161492 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.449967 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9b67j" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="registry-server" containerID="cri-o://6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c" gracePeriod=2 Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.494384 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.494545 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.512075 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.579559 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.970602 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.994319 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hr7rj\" (UniqueName: \"kubernetes.io/projected/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-kube-api-access-hr7rj\") pod \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.994680 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-utilities\") pod \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " Feb 01 07:27:05 crc kubenswrapper[4650]: I0201 07:27:05.994736 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-catalog-content\") pod \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\" (UID: \"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561\") " Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.004758 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-kube-api-access-hr7rj" (OuterVolumeSpecName: "kube-api-access-hr7rj") pod "6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" (UID: "6c5c24b4-b4cc-4577-8cbd-c299f6fd5561"). InnerVolumeSpecName "kube-api-access-hr7rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.005562 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-utilities" (OuterVolumeSpecName: "utilities") pod "6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" (UID: "6c5c24b4-b4cc-4577-8cbd-c299f6fd5561"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.005676 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="416589cc-479e-45e0-8fad-2ccd30115769" path="/var/lib/kubelet/pods/416589cc-479e-45e0-8fad-2ccd30115769/volumes" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.075224 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" (UID: "6c5c24b4-b4cc-4577-8cbd-c299f6fd5561"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.096285 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.096333 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.096350 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hr7rj\" (UniqueName: \"kubernetes.io/projected/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561-kube-api-access-hr7rj\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.471612 4650 generic.go:334] "Generic (PLEG): container finished" podID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerID="6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c" exitCode=0 Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.471716 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b67j" event={"ID":"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561","Type":"ContainerDied","Data":"6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c"} Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.471745 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b67j" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.471792 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b67j" event={"ID":"6c5c24b4-b4cc-4577-8cbd-c299f6fd5561","Type":"ContainerDied","Data":"b1079126d19cd3c437247dfcb9c2e556e2329b7f2ccf19ddc2e676eb1e8f22c0"} Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.471819 4650 scope.go:117] "RemoveContainer" containerID="6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.502150 4650 scope.go:117] "RemoveContainer" containerID="cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.530824 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9b67j"] Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.539760 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9b67j"] Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.546903 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.559468 4650 scope.go:117] "RemoveContainer" containerID="1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.587526 4650 scope.go:117] "RemoveContainer" containerID="6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.588517 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c\": container with ID starting with 6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c not found: ID does not exist" containerID="6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.588601 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c"} err="failed to get container status \"6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c\": rpc error: code = NotFound desc = could not find container \"6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c\": container with ID starting with 6b51d39d1974553e599e72aa4668fc43d5571c07dcb1053bec38ecc85ca9260c not found: ID does not exist" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.588641 4650 scope.go:117] "RemoveContainer" containerID="cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.589557 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05\": container with ID starting with cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05 not found: ID does not exist" containerID="cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.589592 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05"} err="failed to get container status \"cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05\": rpc error: code = NotFound desc = could not find container \"cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05\": container with ID starting with cb6874608e5fecb1a37bf77e416487b7d1e36be3c7c98f500f4929de12aa8c05 not found: ID does not exist" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.589633 4650 scope.go:117] "RemoveContainer" containerID="1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.590085 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8\": container with ID starting with 1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8 not found: ID does not exist" containerID="1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.590148 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8"} err="failed to get container status \"1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8\": rpc error: code = NotFound desc = could not find container \"1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8\": container with ID starting with 1518c7a0cff827656596b0006b19698e07a4cd0fd50470873ea020af8a80abd8 not found: ID does not exist" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980341 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-65b7d57b79-fvmnk"] Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980730 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="416589cc-479e-45e0-8fad-2ccd30115769" containerName="oauth-openshift" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980763 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="416589cc-479e-45e0-8fad-2ccd30115769" containerName="oauth-openshift" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980781 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="156438bd-994b-4c20-929e-19a2ff7a7be1" containerName="pruner" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980796 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="156438bd-994b-4c20-929e-19a2ff7a7be1" containerName="pruner" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980815 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980828 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980851 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="extract-content" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980864 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="extract-content" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980882 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="extract-content" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980895 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="extract-content" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980914 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="extract-utilities" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980928 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="extract-utilities" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980954 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.980969 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.980985 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981000 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.981017 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="extract-utilities" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981056 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="extract-utilities" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.981078 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="extract-utilities" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981092 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="extract-utilities" Feb 01 07:27:06 crc kubenswrapper[4650]: E0201 07:27:06.981113 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="extract-content" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981127 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="extract-content" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981313 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="416589cc-479e-45e0-8fad-2ccd30115769" containerName="oauth-openshift" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981338 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ed3c67d-2427-4ee2-950c-0f705023db71" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981360 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="156438bd-994b-4c20-929e-19a2ff7a7be1" containerName="pruner" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981379 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="86213c9a-3227-4329-8a45-83e5b550f4a2" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.981399 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" containerName="registry-server" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.982138 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.988245 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.989377 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.989717 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.990003 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.991377 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.991697 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.991909 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.994115 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.996196 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.996224 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 01 07:27:06 crc kubenswrapper[4650]: I0201 07:27:06.997594 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.001635 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.005966 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-65b7d57b79-fvmnk"] Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.010803 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-session\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.010871 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-login\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.010919 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.010952 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hf99v\" (UniqueName: \"kubernetes.io/projected/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-kube-api-access-hf99v\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.010991 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-service-ca\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011057 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011147 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011185 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011244 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-router-certs\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011292 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-audit-policies\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011363 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011418 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011470 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-error\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011510 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-audit-dir\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.011640 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.012734 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.022266 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.113801 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.113903 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.113946 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114002 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-router-certs\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114072 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-audit-policies\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114113 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114153 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-error\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114186 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-audit-dir\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114236 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114271 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-session\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114302 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-login\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114342 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hf99v\" (UniqueName: \"kubernetes.io/projected/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-kube-api-access-hf99v\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114372 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.114406 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-service-ca\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.115464 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-service-ca\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.118184 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.119652 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.119710 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-audit-dir\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.119669 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-audit-policies\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.123513 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-error\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.123792 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-session\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.125494 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.126409 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-login\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.127691 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-router-certs\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.128147 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.129597 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.143568 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.147555 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hf99v\" (UniqueName: \"kubernetes.io/projected/c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a-kube-api-access-hf99v\") pod \"oauth-openshift-65b7d57b79-fvmnk\" (UID: \"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a\") " pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.162093 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.162171 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.162230 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.163080 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.163181 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1" gracePeriod=600 Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.325588 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.505466 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1" exitCode=0 Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.505573 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1"} Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.565674 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xn7fv"] Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.875722 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-65b7d57b79-fvmnk"] Feb 01 07:27:07 crc kubenswrapper[4650]: I0201 07:27:07.976464 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c5c24b4-b4cc-4577-8cbd-c299f6fd5561" path="/var/lib/kubelet/pods/6c5c24b4-b4cc-4577-8cbd-c299f6fd5561/volumes" Feb 01 07:27:08 crc kubenswrapper[4650]: I0201 07:27:08.517637 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"db279aeb24995ec5143fa01137b45dc7b7c1ab6084221190d10c8193ed14bc2e"} Feb 01 07:27:08 crc kubenswrapper[4650]: I0201 07:27:08.520338 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xn7fv" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="registry-server" containerID="cri-o://1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859" gracePeriod=2 Feb 01 07:27:08 crc kubenswrapper[4650]: I0201 07:27:08.521274 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" event={"ID":"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a","Type":"ContainerStarted","Data":"e3b9a786c504dae88632a46e5cd5e9f5d6d3af6430d32c8d0bfc8fdab7ef5409"} Feb 01 07:27:08 crc kubenswrapper[4650]: I0201 07:27:08.521305 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" event={"ID":"c8998ecc-d48c-4f26-bbfa-d6a325ebdd6a","Type":"ContainerStarted","Data":"ac1fdd4cd1897572a883447b0ebc0bfc27dd400f2a6e57c331c636acefd44234"} Feb 01 07:27:08 crc kubenswrapper[4650]: I0201 07:27:08.521327 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:08 crc kubenswrapper[4650]: I0201 07:27:08.807402 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" Feb 01 07:27:08 crc kubenswrapper[4650]: I0201 07:27:08.850910 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-65b7d57b79-fvmnk" podStartSLOduration=31.850888239 podStartE2EDuration="31.850888239s" podCreationTimestamp="2026-02-01 07:26:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:27:08.578006165 +0000 UTC m=+227.301104410" watchObservedRunningTime="2026-02-01 07:27:08.850888239 +0000 UTC m=+227.573986484" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.150567 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.248691 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-catalog-content\") pod \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.248813 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxrj2\" (UniqueName: \"kubernetes.io/projected/029eaf1d-b5ae-4719-b2b1-243c9b8850db-kube-api-access-bxrj2\") pod \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.248962 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-utilities\") pod \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\" (UID: \"029eaf1d-b5ae-4719-b2b1-243c9b8850db\") " Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.249905 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-utilities" (OuterVolumeSpecName: "utilities") pod "029eaf1d-b5ae-4719-b2b1-243c9b8850db" (UID: "029eaf1d-b5ae-4719-b2b1-243c9b8850db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.250205 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.256727 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/029eaf1d-b5ae-4719-b2b1-243c9b8850db-kube-api-access-bxrj2" (OuterVolumeSpecName: "kube-api-access-bxrj2") pod "029eaf1d-b5ae-4719-b2b1-243c9b8850db" (UID: "029eaf1d-b5ae-4719-b2b1-243c9b8850db"). InnerVolumeSpecName "kube-api-access-bxrj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.352114 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxrj2\" (UniqueName: \"kubernetes.io/projected/029eaf1d-b5ae-4719-b2b1-243c9b8850db-kube-api-access-bxrj2\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.361206 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt"] Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.361462 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" podUID="bca26641-9399-4eb6-b2d2-59398c7a8a05" containerName="controller-manager" containerID="cri-o://1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216" gracePeriod=30 Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.459789 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp"] Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.460088 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" podUID="99f68f70-14ee-4a44-9629-f32a18409edc" containerName="route-controller-manager" containerID="cri-o://adff36b140a94af5ce8823b4c7b6d44ce8edbaebe922d44895ad06018050acb9" gracePeriod=30 Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.529909 4650 generic.go:334] "Generic (PLEG): container finished" podID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerID="1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859" exitCode=0 Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.529997 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn7fv" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.529973 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn7fv" event={"ID":"029eaf1d-b5ae-4719-b2b1-243c9b8850db","Type":"ContainerDied","Data":"1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859"} Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.530143 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn7fv" event={"ID":"029eaf1d-b5ae-4719-b2b1-243c9b8850db","Type":"ContainerDied","Data":"dea343751e02552d9c62b3c238b213e5f52413b6ef3a51dfa7abb26f5cbdba95"} Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.530162 4650 scope.go:117] "RemoveContainer" containerID="1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.550311 4650 scope.go:117] "RemoveContainer" containerID="dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.584835 4650 scope.go:117] "RemoveContainer" containerID="77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.603851 4650 scope.go:117] "RemoveContainer" containerID="1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859" Feb 01 07:27:09 crc kubenswrapper[4650]: E0201 07:27:09.604896 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859\": container with ID starting with 1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859 not found: ID does not exist" containerID="1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.604969 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859"} err="failed to get container status \"1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859\": rpc error: code = NotFound desc = could not find container \"1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859\": container with ID starting with 1dd7b945981b2692d9dcb4aa24e2de9ea65565c506abcea4298ca896b618c859 not found: ID does not exist" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.605013 4650 scope.go:117] "RemoveContainer" containerID="dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75" Feb 01 07:27:09 crc kubenswrapper[4650]: E0201 07:27:09.606769 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75\": container with ID starting with dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75 not found: ID does not exist" containerID="dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.606802 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75"} err="failed to get container status \"dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75\": rpc error: code = NotFound desc = could not find container \"dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75\": container with ID starting with dfc86c37de840e73c20c87484ad76a08a4cdb6230251cb60e858628a11fc2c75 not found: ID does not exist" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.606829 4650 scope.go:117] "RemoveContainer" containerID="77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5" Feb 01 07:27:09 crc kubenswrapper[4650]: E0201 07:27:09.607262 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5\": container with ID starting with 77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5 not found: ID does not exist" containerID="77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.607284 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5"} err="failed to get container status \"77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5\": rpc error: code = NotFound desc = could not find container \"77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5\": container with ID starting with 77664b6f7737ab431aae7358955a646a9e6243dc3a382d1acf6295e89be87de5 not found: ID does not exist" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.840810 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "029eaf1d-b5ae-4719-b2b1-243c9b8850db" (UID: "029eaf1d-b5ae-4719-b2b1-243c9b8850db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:27:09 crc kubenswrapper[4650]: I0201 07:27:09.859615 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/029eaf1d-b5ae-4719-b2b1-243c9b8850db-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:10 crc kubenswrapper[4650]: I0201 07:27:10.161329 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xn7fv"] Feb 01 07:27:10 crc kubenswrapper[4650]: I0201 07:27:10.169126 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xn7fv"] Feb 01 07:27:10 crc kubenswrapper[4650]: I0201 07:27:10.560326 4650 generic.go:334] "Generic (PLEG): container finished" podID="99f68f70-14ee-4a44-9629-f32a18409edc" containerID="adff36b140a94af5ce8823b4c7b6d44ce8edbaebe922d44895ad06018050acb9" exitCode=0 Feb 01 07:27:10 crc kubenswrapper[4650]: I0201 07:27:10.560440 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" event={"ID":"99f68f70-14ee-4a44-9629-f32a18409edc","Type":"ContainerDied","Data":"adff36b140a94af5ce8823b4c7b6d44ce8edbaebe922d44895ad06018050acb9"} Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.367203 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.409119 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9"] Feb 01 07:27:11 crc kubenswrapper[4650]: E0201 07:27:11.409421 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bca26641-9399-4eb6-b2d2-59398c7a8a05" containerName="controller-manager" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.409437 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="bca26641-9399-4eb6-b2d2-59398c7a8a05" containerName="controller-manager" Feb 01 07:27:11 crc kubenswrapper[4650]: E0201 07:27:11.409456 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="extract-content" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.409465 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="extract-content" Feb 01 07:27:11 crc kubenswrapper[4650]: E0201 07:27:11.409480 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="extract-utilities" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.409489 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="extract-utilities" Feb 01 07:27:11 crc kubenswrapper[4650]: E0201 07:27:11.409503 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="registry-server" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.409511 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="registry-server" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.409625 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="bca26641-9399-4eb6-b2d2-59398c7a8a05" containerName="controller-manager" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.409638 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" containerName="registry-server" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.410152 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.424118 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.444076 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9"] Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.491247 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-proxy-ca-bundles\") pod \"bca26641-9399-4eb6-b2d2-59398c7a8a05\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.491754 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-client-ca\") pod \"bca26641-9399-4eb6-b2d2-59398c7a8a05\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.492348 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-client-ca" (OuterVolumeSpecName: "client-ca") pod "bca26641-9399-4eb6-b2d2-59398c7a8a05" (UID: "bca26641-9399-4eb6-b2d2-59398c7a8a05"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.492392 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "bca26641-9399-4eb6-b2d2-59398c7a8a05" (UID: "bca26641-9399-4eb6-b2d2-59398c7a8a05"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.492657 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn2pg\" (UniqueName: \"kubernetes.io/projected/bca26641-9399-4eb6-b2d2-59398c7a8a05-kube-api-access-kn2pg\") pod \"bca26641-9399-4eb6-b2d2-59398c7a8a05\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.493719 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bca26641-9399-4eb6-b2d2-59398c7a8a05-serving-cert\") pod \"bca26641-9399-4eb6-b2d2-59398c7a8a05\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.493844 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-config\") pod \"bca26641-9399-4eb6-b2d2-59398c7a8a05\" (UID: \"bca26641-9399-4eb6-b2d2-59398c7a8a05\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.494161 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.494232 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.494829 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-config" (OuterVolumeSpecName: "config") pod "bca26641-9399-4eb6-b2d2-59398c7a8a05" (UID: "bca26641-9399-4eb6-b2d2-59398c7a8a05"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.499239 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bca26641-9399-4eb6-b2d2-59398c7a8a05-kube-api-access-kn2pg" (OuterVolumeSpecName: "kube-api-access-kn2pg") pod "bca26641-9399-4eb6-b2d2-59398c7a8a05" (UID: "bca26641-9399-4eb6-b2d2-59398c7a8a05"). InnerVolumeSpecName "kube-api-access-kn2pg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.500742 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bca26641-9399-4eb6-b2d2-59398c7a8a05-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bca26641-9399-4eb6-b2d2-59398c7a8a05" (UID: "bca26641-9399-4eb6-b2d2-59398c7a8a05"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.574552 4650 generic.go:334] "Generic (PLEG): container finished" podID="bca26641-9399-4eb6-b2d2-59398c7a8a05" containerID="1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216" exitCode=0 Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.575360 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" event={"ID":"bca26641-9399-4eb6-b2d2-59398c7a8a05","Type":"ContainerDied","Data":"1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216"} Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.575522 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" event={"ID":"bca26641-9399-4eb6-b2d2-59398c7a8a05","Type":"ContainerDied","Data":"ae11e9d80109da830f9d7b8f3e51c77ce1860c5a6c1e97785264b5aa674fc806"} Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.575622 4650 scope.go:117] "RemoveContainer" containerID="1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.575892 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.595008 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rfrf\" (UniqueName: \"kubernetes.io/projected/99f68f70-14ee-4a44-9629-f32a18409edc-kube-api-access-6rfrf\") pod \"99f68f70-14ee-4a44-9629-f32a18409edc\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.595188 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-config\") pod \"99f68f70-14ee-4a44-9629-f32a18409edc\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.595431 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-client-ca\") pod \"99f68f70-14ee-4a44-9629-f32a18409edc\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.595533 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99f68f70-14ee-4a44-9629-f32a18409edc-serving-cert\") pod \"99f68f70-14ee-4a44-9629-f32a18409edc\" (UID: \"99f68f70-14ee-4a44-9629-f32a18409edc\") " Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.595839 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-client-ca\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.595984 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02301c00-6a7a-457c-8664-32ebc116419e-serving-cert\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.596122 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-proxy-ca-bundles\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.596243 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fmxf\" (UniqueName: \"kubernetes.io/projected/02301c00-6a7a-457c-8664-32ebc116419e-kube-api-access-6fmxf\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.596329 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-config\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.596474 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kn2pg\" (UniqueName: \"kubernetes.io/projected/bca26641-9399-4eb6-b2d2-59398c7a8a05-kube-api-access-kn2pg\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.596570 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bca26641-9399-4eb6-b2d2-59398c7a8a05-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.596635 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca26641-9399-4eb6-b2d2-59398c7a8a05-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.609705 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-config" (OuterVolumeSpecName: "config") pod "99f68f70-14ee-4a44-9629-f32a18409edc" (UID: "99f68f70-14ee-4a44-9629-f32a18409edc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.610289 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-client-ca" (OuterVolumeSpecName: "client-ca") pod "99f68f70-14ee-4a44-9629-f32a18409edc" (UID: "99f68f70-14ee-4a44-9629-f32a18409edc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.611412 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" event={"ID":"99f68f70-14ee-4a44-9629-f32a18409edc","Type":"ContainerDied","Data":"30b08df0a4fa53a4dcb4c4aab12c0a1710cbaeb3b73fe1adadf56f1445052127"} Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.611539 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.610834 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99f68f70-14ee-4a44-9629-f32a18409edc-kube-api-access-6rfrf" (OuterVolumeSpecName: "kube-api-access-6rfrf") pod "99f68f70-14ee-4a44-9629-f32a18409edc" (UID: "99f68f70-14ee-4a44-9629-f32a18409edc"). InnerVolumeSpecName "kube-api-access-6rfrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.619570 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99f68f70-14ee-4a44-9629-f32a18409edc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "99f68f70-14ee-4a44-9629-f32a18409edc" (UID: "99f68f70-14ee-4a44-9629-f32a18409edc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.638063 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt"] Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.639174 4650 scope.go:117] "RemoveContainer" containerID="1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.641717 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-655fc9d5bc-wqxtt"] Feb 01 07:27:11 crc kubenswrapper[4650]: E0201 07:27:11.650553 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216\": container with ID starting with 1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216 not found: ID does not exist" containerID="1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.650618 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216"} err="failed to get container status \"1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216\": rpc error: code = NotFound desc = could not find container \"1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216\": container with ID starting with 1d9fc3fd5e6969f1472cdf1f216bf5817f6c3d3f9daa252c21916d7ffa409216 not found: ID does not exist" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.650686 4650 scope.go:117] "RemoveContainer" containerID="adff36b140a94af5ce8823b4c7b6d44ce8edbaebe922d44895ad06018050acb9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698467 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-proxy-ca-bundles\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698540 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fmxf\" (UniqueName: \"kubernetes.io/projected/02301c00-6a7a-457c-8664-32ebc116419e-kube-api-access-6fmxf\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698573 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-config\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698628 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-client-ca\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698712 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02301c00-6a7a-457c-8664-32ebc116419e-serving-cert\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698769 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698783 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99f68f70-14ee-4a44-9629-f32a18409edc-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698797 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rfrf\" (UniqueName: \"kubernetes.io/projected/99f68f70-14ee-4a44-9629-f32a18409edc-kube-api-access-6rfrf\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.698811 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99f68f70-14ee-4a44-9629-f32a18409edc-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.700181 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-proxy-ca-bundles\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.701193 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-client-ca\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.701413 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-config\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.704665 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02301c00-6a7a-457c-8664-32ebc116419e-serving-cert\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.719601 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fmxf\" (UniqueName: \"kubernetes.io/projected/02301c00-6a7a-457c-8664-32ebc116419e-kube-api-access-6fmxf\") pod \"controller-manager-58dd6d96c8-5d5z9\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.744390 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.946295 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp"] Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.953297 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8f774cf7-m4sbp"] Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.976860 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="029eaf1d-b5ae-4719-b2b1-243c9b8850db" path="/var/lib/kubelet/pods/029eaf1d-b5ae-4719-b2b1-243c9b8850db/volumes" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.977741 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99f68f70-14ee-4a44-9629-f32a18409edc" path="/var/lib/kubelet/pods/99f68f70-14ee-4a44-9629-f32a18409edc/volumes" Feb 01 07:27:11 crc kubenswrapper[4650]: I0201 07:27:11.978411 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bca26641-9399-4eb6-b2d2-59398c7a8a05" path="/var/lib/kubelet/pods/bca26641-9399-4eb6-b2d2-59398c7a8a05/volumes" Feb 01 07:27:12 crc kubenswrapper[4650]: I0201 07:27:12.191776 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9"] Feb 01 07:27:12 crc kubenswrapper[4650]: W0201 07:27:12.199930 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02301c00_6a7a_457c_8664_32ebc116419e.slice/crio-d9d2d54c8e23a59f080567bfc371e6ef70eec0566974baf1254ce64a32ee2087 WatchSource:0}: Error finding container d9d2d54c8e23a59f080567bfc371e6ef70eec0566974baf1254ce64a32ee2087: Status 404 returned error can't find the container with id d9d2d54c8e23a59f080567bfc371e6ef70eec0566974baf1254ce64a32ee2087 Feb 01 07:27:12 crc kubenswrapper[4650]: I0201 07:27:12.618954 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" event={"ID":"02301c00-6a7a-457c-8664-32ebc116419e","Type":"ContainerStarted","Data":"2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c"} Feb 01 07:27:12 crc kubenswrapper[4650]: I0201 07:27:12.619364 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" event={"ID":"02301c00-6a7a-457c-8664-32ebc116419e","Type":"ContainerStarted","Data":"d9d2d54c8e23a59f080567bfc371e6ef70eec0566974baf1254ce64a32ee2087"} Feb 01 07:27:12 crc kubenswrapper[4650]: I0201 07:27:12.619395 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:12 crc kubenswrapper[4650]: I0201 07:27:12.642862 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" podStartSLOduration=3.6428430069999997 podStartE2EDuration="3.642843007s" podCreationTimestamp="2026-02-01 07:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:27:12.638536148 +0000 UTC m=+231.361634393" watchObservedRunningTime="2026-02-01 07:27:12.642843007 +0000 UTC m=+231.365941252" Feb 01 07:27:12 crc kubenswrapper[4650]: I0201 07:27:12.646633 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.979455 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8"] Feb 01 07:27:13 crc kubenswrapper[4650]: E0201 07:27:13.980209 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99f68f70-14ee-4a44-9629-f32a18409edc" containerName="route-controller-manager" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.980226 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="99f68f70-14ee-4a44-9629-f32a18409edc" containerName="route-controller-manager" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.980330 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="99f68f70-14ee-4a44-9629-f32a18409edc" containerName="route-controller-manager" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.980778 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.983496 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.983749 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.988081 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.988250 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.989204 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.991430 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 01 07:27:13 crc kubenswrapper[4650]: I0201 07:27:13.997712 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8"] Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.137378 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-serving-cert\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.137445 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-client-ca\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.137486 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-config\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.137510 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6wdp\" (UniqueName: \"kubernetes.io/projected/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-kube-api-access-c6wdp\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.239571 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-config\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.239647 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6wdp\" (UniqueName: \"kubernetes.io/projected/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-kube-api-access-c6wdp\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.239718 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-serving-cert\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.239747 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-client-ca\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.240901 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-client-ca\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.241705 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-config\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.249309 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-serving-cert\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.265666 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6wdp\" (UniqueName: \"kubernetes.io/projected/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-kube-api-access-c6wdp\") pod \"route-controller-manager-6cdd86b674-lrjm8\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.307166 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:14 crc kubenswrapper[4650]: I0201 07:27:14.769899 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8"] Feb 01 07:27:14 crc kubenswrapper[4650]: W0201 07:27:14.789072 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a28b7f9_5f4b_4fc4_a312_98fc165436ff.slice/crio-936a25b38daf7be0b67a3023ae25f8e5034c0492990d9ba7841e3781991cd285 WatchSource:0}: Error finding container 936a25b38daf7be0b67a3023ae25f8e5034c0492990d9ba7841e3781991cd285: Status 404 returned error can't find the container with id 936a25b38daf7be0b67a3023ae25f8e5034c0492990d9ba7841e3781991cd285 Feb 01 07:27:15 crc kubenswrapper[4650]: I0201 07:27:15.642901 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" event={"ID":"6a28b7f9-5f4b-4fc4-a312-98fc165436ff","Type":"ContainerStarted","Data":"2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8"} Feb 01 07:27:15 crc kubenswrapper[4650]: I0201 07:27:15.643369 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" event={"ID":"6a28b7f9-5f4b-4fc4-a312-98fc165436ff","Type":"ContainerStarted","Data":"936a25b38daf7be0b67a3023ae25f8e5034c0492990d9ba7841e3781991cd285"} Feb 01 07:27:15 crc kubenswrapper[4650]: I0201 07:27:15.662055 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" podStartSLOduration=6.662008578 podStartE2EDuration="6.662008578s" podCreationTimestamp="2026-02-01 07:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:27:15.65937215 +0000 UTC m=+234.382470415" watchObservedRunningTime="2026-02-01 07:27:15.662008578 +0000 UTC m=+234.385106833" Feb 01 07:27:16 crc kubenswrapper[4650]: I0201 07:27:16.651088 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:16 crc kubenswrapper[4650]: I0201 07:27:16.658252 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.950464 4650 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.952345 4650 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.952563 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.952937 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c" gracePeriod=15 Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.953095 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28" gracePeriod=15 Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.953231 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e" gracePeriod=15 Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.953246 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361" gracePeriod=15 Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.952974 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f" gracePeriod=15 Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.954948 4650 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 01 07:27:18 crc kubenswrapper[4650]: E0201 07:27:18.955405 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955441 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 01 07:27:18 crc kubenswrapper[4650]: E0201 07:27:18.955512 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955534 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 01 07:27:18 crc kubenswrapper[4650]: E0201 07:27:18.955563 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955580 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 01 07:27:18 crc kubenswrapper[4650]: E0201 07:27:18.955601 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955618 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 01 07:27:18 crc kubenswrapper[4650]: E0201 07:27:18.955644 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955658 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 01 07:27:18 crc kubenswrapper[4650]: E0201 07:27:18.955681 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955699 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 01 07:27:18 crc kubenswrapper[4650]: E0201 07:27:18.955736 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955752 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.955976 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.956007 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.956074 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.956103 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.956137 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 01 07:27:18 crc kubenswrapper[4650]: I0201 07:27:18.956683 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.061482 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.061546 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.061616 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.080450 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.137859 4650 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.137984 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.162952 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164260 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164366 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164403 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164426 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164452 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164495 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164520 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164910 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164952 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.164982 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.266440 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.267176 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.267461 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.267753 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.268000 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.267685 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.267408 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.267943 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.266987 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.268236 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.379590 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:27:19 crc kubenswrapper[4650]: W0201 07:27:19.409183 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-653ea9c7605a2989548d020c3e34c937e3906f018040f36bbfaa7a31bd0318a1 WatchSource:0}: Error finding container 653ea9c7605a2989548d020c3e34c937e3906f018040f36bbfaa7a31bd0318a1: Status 404 returned error can't find the container with id 653ea9c7605a2989548d020c3e34c937e3906f018040f36bbfaa7a31bd0318a1 Feb 01 07:27:19 crc kubenswrapper[4650]: E0201 07:27:19.412130 4650 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18900eb890212639 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-01 07:27:19.411467833 +0000 UTC m=+238.134566088,LastTimestamp:2026-02-01 07:27:19.411467833 +0000 UTC m=+238.134566088,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.676230 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"653ea9c7605a2989548d020c3e34c937e3906f018040f36bbfaa7a31bd0318a1"} Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.679741 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.681973 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.683268 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f" exitCode=0 Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.683323 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361" exitCode=0 Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.683345 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28" exitCode=0 Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.683366 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e" exitCode=2 Feb 01 07:27:19 crc kubenswrapper[4650]: I0201 07:27:19.683412 4650 scope.go:117] "RemoveContainer" containerID="de145ed9e2a5d070122fb8d07b9ef3bb51b12e053487d8ec5bfcc619bec02c29" Feb 01 07:27:20 crc kubenswrapper[4650]: I0201 07:27:20.693441 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"d974d6eec98320be9b5c743d6410901fd4ed3fc027f4e5312b65d6b4db55e82c"} Feb 01 07:27:20 crc kubenswrapper[4650]: I0201 07:27:20.694581 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:20 crc kubenswrapper[4650]: I0201 07:27:20.697312 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 01 07:27:21 crc kubenswrapper[4650]: I0201 07:27:21.711256 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 01 07:27:21 crc kubenswrapper[4650]: I0201 07:27:21.713662 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c" exitCode=0 Feb 01 07:27:21 crc kubenswrapper[4650]: I0201 07:27:21.972457 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 01 07:27:21 crc kubenswrapper[4650]: I0201 07:27:21.974396 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:21 crc kubenswrapper[4650]: I0201 07:27:21.976928 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:21 crc kubenswrapper[4650]: I0201 07:27:21.977879 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:21 crc kubenswrapper[4650]: I0201 07:27:21.978396 4650 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.116500 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.116945 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.117103 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.116623 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.117864 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.117894 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.218573 4650 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.218603 4650 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.218613 4650 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.728514 4650 scope.go:117] "RemoveContainer" containerID="9f037a76c03c7da82557324b45ac0d37d599360ac58eb10fc541e47770fcdf9f" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.728585 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.731213 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.731614 4650 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.753715 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.753985 4650 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.758584 4650 scope.go:117] "RemoveContainer" containerID="d7745224501b9b23fad68d8579e074d156afed7363d843a58f12abdb0833f361" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.779479 4650 scope.go:117] "RemoveContainer" containerID="2fe118c002f0e3616d7b231e3dc0f3ee07b2767b367ee19a3f53b962aa0dfa28" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.806318 4650 scope.go:117] "RemoveContainer" containerID="dbbde9d0034a5f2c73f095900ec85e17ec5da4c32c3dc78787ab21170dd1544e" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.829770 4650 scope.go:117] "RemoveContainer" containerID="5c5b72e4bd7a9afc257c1b5c551311186e4f7892b2a49cdb8d8f0524bd55af1c" Feb 01 07:27:22 crc kubenswrapper[4650]: I0201 07:27:22.857371 4650 scope.go:117] "RemoveContainer" containerID="a8e1146abb22c9e4665e3e7a75e3c0ced7e6ab1b93f7698c16d8469c92dbe88d" Feb 01 07:27:23 crc kubenswrapper[4650]: E0201 07:27:23.416906 4650 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:23 crc kubenswrapper[4650]: E0201 07:27:23.417834 4650 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:23 crc kubenswrapper[4650]: E0201 07:27:23.418624 4650 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:23 crc kubenswrapper[4650]: E0201 07:27:23.419152 4650 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:23 crc kubenswrapper[4650]: E0201 07:27:23.419861 4650 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:23 crc kubenswrapper[4650]: I0201 07:27:23.419933 4650 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Feb 01 07:27:23 crc kubenswrapper[4650]: E0201 07:27:23.420485 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="200ms" Feb 01 07:27:23 crc kubenswrapper[4650]: E0201 07:27:23.621485 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="400ms" Feb 01 07:27:23 crc kubenswrapper[4650]: I0201 07:27:23.990511 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Feb 01 07:27:24 crc kubenswrapper[4650]: E0201 07:27:24.022517 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="800ms" Feb 01 07:27:24 crc kubenswrapper[4650]: I0201 07:27:24.743451 4650 generic.go:334] "Generic (PLEG): container finished" podID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" containerID="b96d3b2242b4b243eb8c9cc6cd964e62def33fffb2250364fbc55c336dcb9ebf" exitCode=0 Feb 01 07:27:24 crc kubenswrapper[4650]: I0201 07:27:24.743518 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"285b9d60-7e2e-4df8-811a-ddc59b103d1e","Type":"ContainerDied","Data":"b96d3b2242b4b243eb8c9cc6cd964e62def33fffb2250364fbc55c336dcb9ebf"} Feb 01 07:27:24 crc kubenswrapper[4650]: I0201 07:27:24.745218 4650 status_manager.go:851] "Failed to get status for pod" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:24 crc kubenswrapper[4650]: I0201 07:27:24.746046 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:24 crc kubenswrapper[4650]: E0201 07:27:24.824432 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="1.6s" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.244705 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.246168 4650 status_manager.go:851] "Failed to get status for pod" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.246428 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.383823 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-var-lock\") pod \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.384471 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kubelet-dir\") pod \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.384550 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kube-api-access\") pod \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\" (UID: \"285b9d60-7e2e-4df8-811a-ddc59b103d1e\") " Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.384185 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-var-lock" (OuterVolumeSpecName: "var-lock") pod "285b9d60-7e2e-4df8-811a-ddc59b103d1e" (UID: "285b9d60-7e2e-4df8-811a-ddc59b103d1e"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.384995 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "285b9d60-7e2e-4df8-811a-ddc59b103d1e" (UID: "285b9d60-7e2e-4df8-811a-ddc59b103d1e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.396932 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "285b9d60-7e2e-4df8-811a-ddc59b103d1e" (UID: "285b9d60-7e2e-4df8-811a-ddc59b103d1e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:27:26 crc kubenswrapper[4650]: E0201 07:27:26.425292 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="3.2s" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.486019 4650 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-var-lock\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.486132 4650 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.486141 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/285b9d60-7e2e-4df8-811a-ddc59b103d1e-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.943611 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"285b9d60-7e2e-4df8-811a-ddc59b103d1e","Type":"ContainerDied","Data":"8ca9623a850773c08b9b9085bdd633aad2461be67e9ef916b4ec987e0f3e750a"} Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.943657 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ca9623a850773c08b9b9085bdd633aad2461be67e9ef916b4ec987e0f3e750a" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.943725 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.961586 4650 status_manager.go:851] "Failed to get status for pod" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:26 crc kubenswrapper[4650]: I0201 07:27:26.962897 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:27 crc kubenswrapper[4650]: E0201 07:27:27.450617 4650 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18900eb890212639 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-01 07:27:19.411467833 +0000 UTC m=+238.134566088,LastTimestamp:2026-02-01 07:27:19.411467833 +0000 UTC m=+238.134566088,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 01 07:27:29 crc kubenswrapper[4650]: E0201 07:27:29.626120 4650 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="6.4s" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.965443 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.969729 4650 status_manager.go:851] "Failed to get status for pod" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.970715 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.971809 4650 status_manager.go:851] "Failed to get status for pod" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.972336 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.990681 4650 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.990722 4650 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:31 crc kubenswrapper[4650]: E0201 07:27:31.991235 4650 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:31 crc kubenswrapper[4650]: I0201 07:27:31.991965 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:32 crc kubenswrapper[4650]: I0201 07:27:32.998157 4650 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="e50bc9274e0da9fdd8a991344798c014e9f361317712d7e59cf57d1ed07213bb" exitCode=0 Feb 01 07:27:32 crc kubenswrapper[4650]: I0201 07:27:32.998337 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"e50bc9274e0da9fdd8a991344798c014e9f361317712d7e59cf57d1ed07213bb"} Feb 01 07:27:32 crc kubenswrapper[4650]: I0201 07:27:32.998690 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4f40019334c2421bc7053e4d499d3e1b9a6c37fe1d8e71e7af0b8ffa6b27df79"} Feb 01 07:27:33 crc kubenswrapper[4650]: I0201 07:27:32.999122 4650 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:33 crc kubenswrapper[4650]: I0201 07:27:32.999143 4650 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:33 crc kubenswrapper[4650]: E0201 07:27:32.999906 4650 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:33 crc kubenswrapper[4650]: I0201 07:27:33.000437 4650 status_manager.go:851] "Failed to get status for pod" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:33 crc kubenswrapper[4650]: I0201 07:27:33.000942 4650 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Feb 01 07:27:34 crc kubenswrapper[4650]: I0201 07:27:34.018345 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3a33326089b9b25d8d234c7ef8f50a1685f4815ded7df2c4d4a135135296e6f3"} Feb 01 07:27:34 crc kubenswrapper[4650]: I0201 07:27:34.025642 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 01 07:27:34 crc kubenswrapper[4650]: I0201 07:27:34.025724 4650 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b" exitCode=1 Feb 01 07:27:34 crc kubenswrapper[4650]: I0201 07:27:34.025776 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b"} Feb 01 07:27:34 crc kubenswrapper[4650]: I0201 07:27:34.026827 4650 scope.go:117] "RemoveContainer" containerID="cbdba6eb7e50992426f1f1f90e9d9d8e5c5374dbea70ec6dc42605f0764b821b" Feb 01 07:27:34 crc kubenswrapper[4650]: I0201 07:27:34.556081 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:27:35 crc kubenswrapper[4650]: I0201 07:27:35.034325 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0735ba63d06c49c19450fda04e3a0563dc71ad884a5d65df4766a823d87de3ac"} Feb 01 07:27:35 crc kubenswrapper[4650]: I0201 07:27:35.038186 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 01 07:27:35 crc kubenswrapper[4650]: I0201 07:27:35.038233 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"6efc2cbe158fc1763f6ba9cac1945fd8f24eeeb310d230f015db681f3cf63a51"} Feb 01 07:27:36 crc kubenswrapper[4650]: I0201 07:27:36.049178 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a28b4a99f4ff1760a011cdb2f5d8cddc0cfea74bf583b2857c491e9598bead27"} Feb 01 07:27:36 crc kubenswrapper[4650]: I0201 07:27:36.049689 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"73aa3c576ffd30d01a57fe7f23dc53ce5bab134b4135000d038ec4c06d361d1a"} Feb 01 07:27:37 crc kubenswrapper[4650]: I0201 07:27:37.058806 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a47f40b9ab3380b0412299d166eb485726b4be7b15823a1d13af8fa3f27570db"} Feb 01 07:27:37 crc kubenswrapper[4650]: I0201 07:27:37.058992 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:37 crc kubenswrapper[4650]: I0201 07:27:37.059145 4650 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:37 crc kubenswrapper[4650]: I0201 07:27:37.059179 4650 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:37 crc kubenswrapper[4650]: I0201 07:27:37.075129 4650 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:37 crc kubenswrapper[4650]: I0201 07:27:37.685422 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:27:37 crc kubenswrapper[4650]: I0201 07:27:37.689540 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:27:38 crc kubenswrapper[4650]: I0201 07:27:38.064724 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:27:38 crc kubenswrapper[4650]: I0201 07:27:38.065445 4650 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:38 crc kubenswrapper[4650]: I0201 07:27:38.066374 4650 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f652440c-d98c-4d56-bdf1-4d7c835496d1" Feb 01 07:27:41 crc kubenswrapper[4650]: I0201 07:27:41.979611 4650 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="ef5655f9-5e23-44c1-906e-d96e19f04348" Feb 01 07:27:49 crc kubenswrapper[4650]: I0201 07:27:49.928002 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 01 07:27:49 crc kubenswrapper[4650]: I0201 07:27:49.954858 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 01 07:27:50 crc kubenswrapper[4650]: I0201 07:27:50.110617 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 01 07:27:50 crc kubenswrapper[4650]: I0201 07:27:50.260263 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 01 07:27:50 crc kubenswrapper[4650]: I0201 07:27:50.717873 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 01 07:27:50 crc kubenswrapper[4650]: I0201 07:27:50.972075 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 01 07:27:51 crc kubenswrapper[4650]: I0201 07:27:51.028750 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 01 07:27:51 crc kubenswrapper[4650]: I0201 07:27:51.173176 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 01 07:27:51 crc kubenswrapper[4650]: I0201 07:27:51.757751 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.065308 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.081426 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.215967 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.258759 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.283682 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.321488 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.423866 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.440805 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.706511 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.791301 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.887450 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 01 07:27:52 crc kubenswrapper[4650]: I0201 07:27:52.956618 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.027655 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.144417 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.304672 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.413402 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.413795 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.429713 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.478074 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.518300 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.647082 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.685395 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.730939 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 01 07:27:53 crc kubenswrapper[4650]: I0201 07:27:53.764641 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.076478 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.156734 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.227908 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.299786 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.439089 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.453566 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.471988 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.543307 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.664684 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.723622 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.752973 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.761576 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.763552 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.804680 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 01 07:27:54 crc kubenswrapper[4650]: I0201 07:27:54.859168 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.028517 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.147669 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.198356 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.265742 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.402725 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.417793 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.421461 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.452264 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.615272 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.682112 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.715706 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.815215 4650 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.822719 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.885997 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.911562 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 01 07:27:55 crc kubenswrapper[4650]: I0201 07:27:55.954272 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.057709 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.121404 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.158816 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.170584 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.176406 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.205836 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.238599 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.282111 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.298662 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.348316 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.375192 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.417300 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.446460 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.547333 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.598824 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.688954 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.701922 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.740265 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.760785 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.761378 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.782297 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.935163 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 01 07:27:56 crc kubenswrapper[4650]: I0201 07:27:56.990249 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.092371 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.286673 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.299098 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.300577 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.316369 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.318735 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.350546 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.364073 4650 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.365601 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.365960 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=38.36593772 podStartE2EDuration="38.36593772s" podCreationTimestamp="2026-02-01 07:27:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:27:40.225666564 +0000 UTC m=+258.948764809" watchObservedRunningTime="2026-02-01 07:27:57.36593772 +0000 UTC m=+276.089035985" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.369714 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.369770 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.375330 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.399532 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.425353 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=20.425321821 podStartE2EDuration="20.425321821s" podCreationTimestamp="2026-02-01 07:27:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:27:57.403283519 +0000 UTC m=+276.126381764" watchObservedRunningTime="2026-02-01 07:27:57.425321821 +0000 UTC m=+276.148420066" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.525018 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.527393 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.575640 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.610602 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.621977 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.704668 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.749292 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.765505 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.847266 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.887612 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.899161 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.930831 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 01 07:27:57 crc kubenswrapper[4650]: I0201 07:27:57.960845 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.035966 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.080366 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.200589 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.208884 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.212498 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.220082 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.294184 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.309005 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.392392 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.413729 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.420515 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.534579 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.573974 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.607731 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.898657 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.899657 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 01 07:27:58 crc kubenswrapper[4650]: I0201 07:27:58.919345 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.031914 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.076758 4650 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.098841 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.140556 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.171731 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.282871 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.410867 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.417408 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.532390 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.600945 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.692845 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.756910 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.790356 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.809718 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.826218 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.829797 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.860354 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.961980 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 01 07:27:59 crc kubenswrapper[4650]: I0201 07:27:59.989266 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.178718 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.274492 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.279979 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.294599 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.325710 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.343917 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.368083 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.407710 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.409166 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.422509 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.509268 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.572050 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.848587 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.866712 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.896561 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 01 07:28:00 crc kubenswrapper[4650]: I0201 07:28:00.901900 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.018506 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.069090 4650 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.076597 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.098094 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.128960 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.175450 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.332497 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.339162 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.401085 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.460591 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.550651 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.569132 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.610438 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.615062 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.656867 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.731222 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.756714 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.822987 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.824744 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.829962 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.993044 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:28:01 crc kubenswrapper[4650]: I0201 07:28:01.993237 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.000245 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.004773 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.184062 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.230731 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.260894 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.261464 4650 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.380327 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.410520 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.432900 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.545557 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.620093 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.718387 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.721545 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 01 07:28:02 crc kubenswrapper[4650]: I0201 07:28:02.917907 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.020404 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.061701 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.061746 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.109954 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.180368 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.205606 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.222145 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.323296 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.361782 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.372125 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.498617 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.557743 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.558929 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.590514 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.716861 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.718574 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.801310 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.825010 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.885566 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.962978 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 01 07:28:03 crc kubenswrapper[4650]: I0201 07:28:03.964854 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.028267 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.051951 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.133077 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.239045 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.380322 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.639169 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.660851 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.718414 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.871511 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 01 07:28:04 crc kubenswrapper[4650]: I0201 07:28:04.948381 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.111329 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.112128 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.132732 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.254299 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.265809 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.412066 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.538179 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.626256 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.633333 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.671342 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.711624 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.855017 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.872229 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.955019 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 01 07:28:05 crc kubenswrapper[4650]: I0201 07:28:05.978167 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 01 07:28:06 crc kubenswrapper[4650]: I0201 07:28:06.111273 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 01 07:28:06 crc kubenswrapper[4650]: I0201 07:28:06.209077 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 01 07:28:06 crc kubenswrapper[4650]: I0201 07:28:06.232923 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 01 07:28:06 crc kubenswrapper[4650]: I0201 07:28:06.464649 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 01 07:28:06 crc kubenswrapper[4650]: I0201 07:28:06.768643 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 01 07:28:06 crc kubenswrapper[4650]: I0201 07:28:06.890602 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 01 07:28:07 crc kubenswrapper[4650]: I0201 07:28:07.373151 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 01 07:28:07 crc kubenswrapper[4650]: I0201 07:28:07.391872 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 01 07:28:07 crc kubenswrapper[4650]: I0201 07:28:07.475209 4650 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 01 07:28:07 crc kubenswrapper[4650]: I0201 07:28:07.602448 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 01 07:28:07 crc kubenswrapper[4650]: I0201 07:28:07.670512 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 01 07:28:08 crc kubenswrapper[4650]: I0201 07:28:08.243688 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 01 07:28:08 crc kubenswrapper[4650]: I0201 07:28:08.471756 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.341463 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9"] Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.342928 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" podUID="02301c00-6a7a-457c-8664-32ebc116419e" containerName="controller-manager" containerID="cri-o://2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c" gracePeriod=30 Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.350150 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8"] Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.350628 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" podUID="6a28b7f9-5f4b-4fc4-a312-98fc165436ff" containerName="route-controller-manager" containerID="cri-o://2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8" gracePeriod=30 Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.786201 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.792899 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817260 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-serving-cert\") pod \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817342 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-config\") pod \"02301c00-6a7a-457c-8664-32ebc116419e\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817373 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-proxy-ca-bundles\") pod \"02301c00-6a7a-457c-8664-32ebc116419e\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817462 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6wdp\" (UniqueName: \"kubernetes.io/projected/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-kube-api-access-c6wdp\") pod \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817484 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-client-ca\") pod \"02301c00-6a7a-457c-8664-32ebc116419e\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817534 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02301c00-6a7a-457c-8664-32ebc116419e-serving-cert\") pod \"02301c00-6a7a-457c-8664-32ebc116419e\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817559 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-client-ca\") pod \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817582 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fmxf\" (UniqueName: \"kubernetes.io/projected/02301c00-6a7a-457c-8664-32ebc116419e-kube-api-access-6fmxf\") pod \"02301c00-6a7a-457c-8664-32ebc116419e\" (UID: \"02301c00-6a7a-457c-8664-32ebc116419e\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.817608 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-config\") pod \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\" (UID: \"6a28b7f9-5f4b-4fc4-a312-98fc165436ff\") " Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.818774 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-client-ca" (OuterVolumeSpecName: "client-ca") pod "02301c00-6a7a-457c-8664-32ebc116419e" (UID: "02301c00-6a7a-457c-8664-32ebc116419e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.819277 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "02301c00-6a7a-457c-8664-32ebc116419e" (UID: "02301c00-6a7a-457c-8664-32ebc116419e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.819499 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-client-ca" (OuterVolumeSpecName: "client-ca") pod "6a28b7f9-5f4b-4fc4-a312-98fc165436ff" (UID: "6a28b7f9-5f4b-4fc4-a312-98fc165436ff"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.819537 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-config" (OuterVolumeSpecName: "config") pod "6a28b7f9-5f4b-4fc4-a312-98fc165436ff" (UID: "6a28b7f9-5f4b-4fc4-a312-98fc165436ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.820399 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-config" (OuterVolumeSpecName: "config") pod "02301c00-6a7a-457c-8664-32ebc116419e" (UID: "02301c00-6a7a-457c-8664-32ebc116419e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.829258 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02301c00-6a7a-457c-8664-32ebc116419e-kube-api-access-6fmxf" (OuterVolumeSpecName: "kube-api-access-6fmxf") pod "02301c00-6a7a-457c-8664-32ebc116419e" (UID: "02301c00-6a7a-457c-8664-32ebc116419e"). InnerVolumeSpecName "kube-api-access-6fmxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.834785 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6a28b7f9-5f4b-4fc4-a312-98fc165436ff" (UID: "6a28b7f9-5f4b-4fc4-a312-98fc165436ff"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.840891 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02301c00-6a7a-457c-8664-32ebc116419e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "02301c00-6a7a-457c-8664-32ebc116419e" (UID: "02301c00-6a7a-457c-8664-32ebc116419e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.844059 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-kube-api-access-c6wdp" (OuterVolumeSpecName: "kube-api-access-c6wdp") pod "6a28b7f9-5f4b-4fc4-a312-98fc165436ff" (UID: "6a28b7f9-5f4b-4fc4-a312-98fc165436ff"). InnerVolumeSpecName "kube-api-access-c6wdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.919845 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.919924 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.919940 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.919959 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6wdp\" (UniqueName: \"kubernetes.io/projected/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-kube-api-access-c6wdp\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.919973 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02301c00-6a7a-457c-8664-32ebc116419e-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.919988 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02301c00-6a7a-457c-8664-32ebc116419e-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.920000 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.920012 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fmxf\" (UniqueName: \"kubernetes.io/projected/02301c00-6a7a-457c-8664-32ebc116419e-kube-api-access-6fmxf\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:09 crc kubenswrapper[4650]: I0201 07:28:09.920047 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a28b7f9-5f4b-4fc4-a312-98fc165436ff-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.285604 4650 generic.go:334] "Generic (PLEG): container finished" podID="02301c00-6a7a-457c-8664-32ebc116419e" containerID="2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c" exitCode=0 Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.285712 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.285757 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" event={"ID":"02301c00-6a7a-457c-8664-32ebc116419e","Type":"ContainerDied","Data":"2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c"} Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.285877 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9" event={"ID":"02301c00-6a7a-457c-8664-32ebc116419e","Type":"ContainerDied","Data":"d9d2d54c8e23a59f080567bfc371e6ef70eec0566974baf1254ce64a32ee2087"} Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.285993 4650 scope.go:117] "RemoveContainer" containerID="2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.290438 4650 generic.go:334] "Generic (PLEG): container finished" podID="6a28b7f9-5f4b-4fc4-a312-98fc165436ff" containerID="2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8" exitCode=0 Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.290639 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" event={"ID":"6a28b7f9-5f4b-4fc4-a312-98fc165436ff","Type":"ContainerDied","Data":"2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8"} Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.290735 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" event={"ID":"6a28b7f9-5f4b-4fc4-a312-98fc165436ff","Type":"ContainerDied","Data":"936a25b38daf7be0b67a3023ae25f8e5034c0492990d9ba7841e3781991cd285"} Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.290909 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.319865 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9"] Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.326382 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58dd6d96c8-5d5z9"] Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.343459 4650 scope.go:117] "RemoveContainer" containerID="2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c" Feb 01 07:28:10 crc kubenswrapper[4650]: E0201 07:28:10.344542 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c\": container with ID starting with 2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c not found: ID does not exist" containerID="2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.344613 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c"} err="failed to get container status \"2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c\": rpc error: code = NotFound desc = could not find container \"2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c\": container with ID starting with 2792904a02179895e4da561912b8100a04e0446c1354b7856009b4d2c548926c not found: ID does not exist" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.344678 4650 scope.go:117] "RemoveContainer" containerID="2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.360854 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8"] Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.367845 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdd86b674-lrjm8"] Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.375606 4650 scope.go:117] "RemoveContainer" containerID="2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8" Feb 01 07:28:10 crc kubenswrapper[4650]: E0201 07:28:10.376526 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8\": container with ID starting with 2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8 not found: ID does not exist" containerID="2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8" Feb 01 07:28:10 crc kubenswrapper[4650]: I0201 07:28:10.376574 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8"} err="failed to get container status \"2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8\": rpc error: code = NotFound desc = could not find container \"2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8\": container with ID starting with 2ca6bb3257b072c2f9a4e17bfbe75ebabac88d728c190f97c675f2f683eeddc8 not found: ID does not exist" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.038321 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7"] Feb 01 07:28:11 crc kubenswrapper[4650]: E0201 07:28:11.038649 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a28b7f9-5f4b-4fc4-a312-98fc165436ff" containerName="route-controller-manager" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.038664 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a28b7f9-5f4b-4fc4-a312-98fc165436ff" containerName="route-controller-manager" Feb 01 07:28:11 crc kubenswrapper[4650]: E0201 07:28:11.038677 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02301c00-6a7a-457c-8664-32ebc116419e" containerName="controller-manager" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.038685 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="02301c00-6a7a-457c-8664-32ebc116419e" containerName="controller-manager" Feb 01 07:28:11 crc kubenswrapper[4650]: E0201 07:28:11.038698 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" containerName="installer" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.038704 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" containerName="installer" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.038816 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="285b9d60-7e2e-4df8-811a-ddc59b103d1e" containerName="installer" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.038833 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="02301c00-6a7a-457c-8664-32ebc116419e" containerName="controller-manager" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.038845 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a28b7f9-5f4b-4fc4-a312-98fc165436ff" containerName="route-controller-manager" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.039455 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.044196 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.044406 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.044682 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.044910 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.046265 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.049821 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.054693 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg"] Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.056509 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.060126 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.060645 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.060819 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.061282 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.061713 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.063634 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.070809 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7"] Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073046 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-config\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073100 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-client-ca\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073180 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-proxy-ca-bundles\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073212 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-client-ca\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073317 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-serving-cert\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073346 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrl5j\" (UniqueName: \"kubernetes.io/projected/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-kube-api-access-lrl5j\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073564 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-config\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073657 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91daafe2-b686-4074-84df-53cd65561985-serving-cert\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.073791 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scgf8\" (UniqueName: \"kubernetes.io/projected/91daafe2-b686-4074-84df-53cd65561985-kube-api-access-scgf8\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.081642 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.109230 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg"] Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175161 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrl5j\" (UniqueName: \"kubernetes.io/projected/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-kube-api-access-lrl5j\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175232 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-config\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175258 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91daafe2-b686-4074-84df-53cd65561985-serving-cert\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175302 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scgf8\" (UniqueName: \"kubernetes.io/projected/91daafe2-b686-4074-84df-53cd65561985-kube-api-access-scgf8\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175330 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-config\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175366 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-client-ca\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175404 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-proxy-ca-bundles\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175424 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-client-ca\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.175454 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-serving-cert\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.177435 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-client-ca\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.178082 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-client-ca\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.179438 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-proxy-ca-bundles\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.193292 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-serving-cert\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.193638 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-config\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.193745 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-config\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.194305 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91daafe2-b686-4074-84df-53cd65561985-serving-cert\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.200862 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scgf8\" (UniqueName: \"kubernetes.io/projected/91daafe2-b686-4074-84df-53cd65561985-kube-api-access-scgf8\") pod \"controller-manager-6c8b55bd9c-4jdfg\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.207469 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrl5j\" (UniqueName: \"kubernetes.io/projected/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-kube-api-access-lrl5j\") pod \"route-controller-manager-6fd9c7dcbc-cqxj7\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.389781 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.403908 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:11 crc kubenswrapper[4650]: W0201 07:28:11.620579 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f4f45a6_0ca2_4197_9f7b_c16e8bf6a664.slice/crio-cea021c813c280bc6877eeefbf37c76757368596403c922cede96de90fb69413 WatchSource:0}: Error finding container cea021c813c280bc6877eeefbf37c76757368596403c922cede96de90fb69413: Status 404 returned error can't find the container with id cea021c813c280bc6877eeefbf37c76757368596403c922cede96de90fb69413 Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.627212 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7"] Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.909810 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg"] Feb 01 07:28:11 crc kubenswrapper[4650]: W0201 07:28:11.918788 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91daafe2_b686_4074_84df_53cd65561985.slice/crio-53bba7c1d2d945c287b72cd2ba965fde499e006ae4179aadfd2d6539bc0d6105 WatchSource:0}: Error finding container 53bba7c1d2d945c287b72cd2ba965fde499e006ae4179aadfd2d6539bc0d6105: Status 404 returned error can't find the container with id 53bba7c1d2d945c287b72cd2ba965fde499e006ae4179aadfd2d6539bc0d6105 Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.979696 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02301c00-6a7a-457c-8664-32ebc116419e" path="/var/lib/kubelet/pods/02301c00-6a7a-457c-8664-32ebc116419e/volumes" Feb 01 07:28:11 crc kubenswrapper[4650]: I0201 07:28:11.980904 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a28b7f9-5f4b-4fc4-a312-98fc165436ff" path="/var/lib/kubelet/pods/6a28b7f9-5f4b-4fc4-a312-98fc165436ff/volumes" Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.308229 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" event={"ID":"91daafe2-b686-4074-84df-53cd65561985","Type":"ContainerStarted","Data":"417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e"} Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.308715 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.308732 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" event={"ID":"91daafe2-b686-4074-84df-53cd65561985","Type":"ContainerStarted","Data":"53bba7c1d2d945c287b72cd2ba965fde499e006ae4179aadfd2d6539bc0d6105"} Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.313255 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" event={"ID":"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664","Type":"ContainerStarted","Data":"e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b"} Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.313970 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" event={"ID":"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664","Type":"ContainerStarted","Data":"cea021c813c280bc6877eeefbf37c76757368596403c922cede96de90fb69413"} Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.314544 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.317672 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.319151 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.330892 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" podStartSLOduration=3.330873595 podStartE2EDuration="3.330873595s" podCreationTimestamp="2026-02-01 07:28:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:28:12.328928561 +0000 UTC m=+291.052026806" watchObservedRunningTime="2026-02-01 07:28:12.330873595 +0000 UTC m=+291.053971850" Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.357490 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" podStartSLOduration=3.35746465 podStartE2EDuration="3.35746465s" podCreationTimestamp="2026-02-01 07:28:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:28:12.355768184 +0000 UTC m=+291.078866449" watchObservedRunningTime="2026-02-01 07:28:12.35746465 +0000 UTC m=+291.080562895" Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.912864 4650 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 01 07:28:12 crc kubenswrapper[4650]: I0201 07:28:12.913431 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://d974d6eec98320be9b5c743d6410901fd4ed3fc027f4e5312b65d6b4db55e82c" gracePeriod=5 Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.351956 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.352727 4650 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="d974d6eec98320be9b5c743d6410901fd4ed3fc027f4e5312b65d6b4db55e82c" exitCode=137 Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.514211 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.514331 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.710801 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.710875 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.710901 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.710931 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.710943 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.710972 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.710993 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.711138 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.711320 4650 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.711333 4650 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.711343 4650 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.711363 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.725587 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.812676 4650 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:18 crc kubenswrapper[4650]: I0201 07:28:18.812968 4650 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.361589 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.361691 4650 scope.go:117] "RemoveContainer" containerID="d974d6eec98320be9b5c743d6410901fd4ed3fc027f4e5312b65d6b4db55e82c" Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.361740 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.980173 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.980820 4650 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.993748 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.994005 4650 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="745b57e6-f765-43f3-a966-a30eb023f0b9" Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.997514 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 01 07:28:19 crc kubenswrapper[4650]: I0201 07:28:19.997557 4650 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="745b57e6-f765-43f3-a966-a30eb023f0b9" Feb 01 07:28:21 crc kubenswrapper[4650]: I0201 07:28:21.718773 4650 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Feb 01 07:28:23 crc kubenswrapper[4650]: I0201 07:28:23.369588 4650 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-tprml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Feb 01 07:28:23 crc kubenswrapper[4650]: I0201 07:28:23.370191 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Feb 01 07:28:23 crc kubenswrapper[4650]: I0201 07:28:23.370409 4650 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-tprml container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Feb 01 07:28:23 crc kubenswrapper[4650]: I0201 07:28:23.370448 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Feb 01 07:28:23 crc kubenswrapper[4650]: I0201 07:28:23.394207 4650 generic.go:334] "Generic (PLEG): container finished" podID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerID="9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883" exitCode=0 Feb 01 07:28:23 crc kubenswrapper[4650]: I0201 07:28:23.394289 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" event={"ID":"784dfbaa-4863-45d9-ac03-05d772fcb779","Type":"ContainerDied","Data":"9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883"} Feb 01 07:28:23 crc kubenswrapper[4650]: I0201 07:28:23.395178 4650 scope.go:117] "RemoveContainer" containerID="9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883" Feb 01 07:28:24 crc kubenswrapper[4650]: I0201 07:28:24.406610 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" event={"ID":"784dfbaa-4863-45d9-ac03-05d772fcb779","Type":"ContainerStarted","Data":"a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db"} Feb 01 07:28:24 crc kubenswrapper[4650]: I0201 07:28:24.407393 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:28:24 crc kubenswrapper[4650]: I0201 07:28:24.411853 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:28:29 crc kubenswrapper[4650]: I0201 07:28:29.335732 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg"] Feb 01 07:28:29 crc kubenswrapper[4650]: I0201 07:28:29.336506 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" podUID="91daafe2-b686-4074-84df-53cd65561985" containerName="controller-manager" containerID="cri-o://417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e" gracePeriod=30 Feb 01 07:28:29 crc kubenswrapper[4650]: I0201 07:28:29.360323 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7"] Feb 01 07:28:29 crc kubenswrapper[4650]: I0201 07:28:29.360708 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" podUID="1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" containerName="route-controller-manager" containerID="cri-o://e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b" gracePeriod=30 Feb 01 07:28:29 crc kubenswrapper[4650]: I0201 07:28:29.981457 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.001404 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.097890 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrl5j\" (UniqueName: \"kubernetes.io/projected/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-kube-api-access-lrl5j\") pod \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.098174 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-client-ca\") pod \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.098213 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-serving-cert\") pod \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.098278 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-config\") pod \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\" (UID: \"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.099150 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-client-ca" (OuterVolumeSpecName: "client-ca") pod "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" (UID: "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.099233 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-config" (OuterVolumeSpecName: "config") pod "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" (UID: "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.103476 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-kube-api-access-lrl5j" (OuterVolumeSpecName: "kube-api-access-lrl5j") pod "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" (UID: "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664"). InnerVolumeSpecName "kube-api-access-lrl5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.103481 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" (UID: "1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.199898 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-config\") pod \"91daafe2-b686-4074-84df-53cd65561985\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.199949 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-client-ca\") pod \"91daafe2-b686-4074-84df-53cd65561985\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.200017 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scgf8\" (UniqueName: \"kubernetes.io/projected/91daafe2-b686-4074-84df-53cd65561985-kube-api-access-scgf8\") pod \"91daafe2-b686-4074-84df-53cd65561985\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.200075 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-proxy-ca-bundles\") pod \"91daafe2-b686-4074-84df-53cd65561985\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.200102 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91daafe2-b686-4074-84df-53cd65561985-serving-cert\") pod \"91daafe2-b686-4074-84df-53cd65561985\" (UID: \"91daafe2-b686-4074-84df-53cd65561985\") " Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.200457 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.200474 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.200484 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrl5j\" (UniqueName: \"kubernetes.io/projected/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-kube-api-access-lrl5j\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.201305 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "91daafe2-b686-4074-84df-53cd65561985" (UID: "91daafe2-b686-4074-84df-53cd65561985"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.201486 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.201662 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-client-ca" (OuterVolumeSpecName: "client-ca") pod "91daafe2-b686-4074-84df-53cd65561985" (UID: "91daafe2-b686-4074-84df-53cd65561985"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.201904 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-config" (OuterVolumeSpecName: "config") pod "91daafe2-b686-4074-84df-53cd65561985" (UID: "91daafe2-b686-4074-84df-53cd65561985"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.203780 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91daafe2-b686-4074-84df-53cd65561985-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "91daafe2-b686-4074-84df-53cd65561985" (UID: "91daafe2-b686-4074-84df-53cd65561985"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.204424 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91daafe2-b686-4074-84df-53cd65561985-kube-api-access-scgf8" (OuterVolumeSpecName: "kube-api-access-scgf8") pod "91daafe2-b686-4074-84df-53cd65561985" (UID: "91daafe2-b686-4074-84df-53cd65561985"). InnerVolumeSpecName "kube-api-access-scgf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.303189 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.303225 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.303237 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scgf8\" (UniqueName: \"kubernetes.io/projected/91daafe2-b686-4074-84df-53cd65561985-kube-api-access-scgf8\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.303249 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91daafe2-b686-4074-84df-53cd65561985-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.303258 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91daafe2-b686-4074-84df-53cd65561985-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.448857 4650 generic.go:334] "Generic (PLEG): container finished" podID="1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" containerID="e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b" exitCode=0 Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.448936 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" event={"ID":"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664","Type":"ContainerDied","Data":"e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b"} Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.449005 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.449567 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7" event={"ID":"1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664","Type":"ContainerDied","Data":"cea021c813c280bc6877eeefbf37c76757368596403c922cede96de90fb69413"} Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.449636 4650 scope.go:117] "RemoveContainer" containerID="e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.454886 4650 generic.go:334] "Generic (PLEG): container finished" podID="91daafe2-b686-4074-84df-53cd65561985" containerID="417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e" exitCode=0 Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.454962 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" event={"ID":"91daafe2-b686-4074-84df-53cd65561985","Type":"ContainerDied","Data":"417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e"} Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.455010 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" event={"ID":"91daafe2-b686-4074-84df-53cd65561985","Type":"ContainerDied","Data":"53bba7c1d2d945c287b72cd2ba965fde499e006ae4179aadfd2d6539bc0d6105"} Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.455147 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.482548 4650 scope.go:117] "RemoveContainer" containerID="e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b" Feb 01 07:28:30 crc kubenswrapper[4650]: E0201 07:28:30.483146 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b\": container with ID starting with e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b not found: ID does not exist" containerID="e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.483180 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b"} err="failed to get container status \"e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b\": rpc error: code = NotFound desc = could not find container \"e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b\": container with ID starting with e5e8bb4cefcb7eeb0d7d5fa2cd0aaa1686bdf1e42c7f0232748ea4014bdcc28b not found: ID does not exist" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.483214 4650 scope.go:117] "RemoveContainer" containerID="417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.495638 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7"] Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.499869 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-cqxj7"] Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.531815 4650 scope.go:117] "RemoveContainer" containerID="417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e" Feb 01 07:28:30 crc kubenswrapper[4650]: E0201 07:28:30.533432 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e\": container with ID starting with 417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e not found: ID does not exist" containerID="417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.533514 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e"} err="failed to get container status \"417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e\": rpc error: code = NotFound desc = could not find container \"417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e\": container with ID starting with 417298ab09a0a447dc39b03405cb8a50318bf4b64cd419aeff71843407d2712e not found: ID does not exist" Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.536353 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg"] Feb 01 07:28:30 crc kubenswrapper[4650]: I0201 07:28:30.541147 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-4jdfg"] Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.044328 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw"] Feb 01 07:28:31 crc kubenswrapper[4650]: E0201 07:28:31.044859 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" containerName="route-controller-manager" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.044892 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" containerName="route-controller-manager" Feb 01 07:28:31 crc kubenswrapper[4650]: E0201 07:28:31.044922 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91daafe2-b686-4074-84df-53cd65561985" containerName="controller-manager" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.044936 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="91daafe2-b686-4074-84df-53cd65561985" containerName="controller-manager" Feb 01 07:28:31 crc kubenswrapper[4650]: E0201 07:28:31.044954 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.044970 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.045200 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.045222 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="91daafe2-b686-4074-84df-53cd65561985" containerName="controller-manager" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.045249 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" containerName="route-controller-manager" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.046072 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.049239 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gq96d"] Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.050467 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.050532 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.050551 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.050911 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.051662 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.052367 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.055388 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.055614 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.055620 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.056130 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.059014 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.059060 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.065515 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.072936 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.075327 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw"] Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.089664 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gq96d"] Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219430 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-config\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219491 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7zbw\" (UniqueName: \"kubernetes.io/projected/5ce8923d-7830-4418-8466-4613a5fc0b7c-kube-api-access-p7zbw\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219596 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce8923d-7830-4418-8466-4613a5fc0b7c-serving-cert\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219629 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219677 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-config\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219720 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-client-ca\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219747 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-client-ca\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219789 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twch8\" (UniqueName: \"kubernetes.io/projected/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-kube-api-access-twch8\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.219885 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-serving-cert\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.322000 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce8923d-7830-4418-8466-4613a5fc0b7c-serving-cert\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.322527 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.322656 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-config\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.322810 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-client-ca\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.322960 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-client-ca\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.323125 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twch8\" (UniqueName: \"kubernetes.io/projected/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-kube-api-access-twch8\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.323272 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-serving-cert\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.323399 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-config\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.323511 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7zbw\" (UniqueName: \"kubernetes.io/projected/5ce8923d-7830-4418-8466-4613a5fc0b7c-kube-api-access-p7zbw\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.324351 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-client-ca\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.324920 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.325455 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-config\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.326359 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-config\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.327769 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce8923d-7830-4418-8466-4613a5fc0b7c-serving-cert\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.328279 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-client-ca\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.330298 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-serving-cert\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.352823 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7zbw\" (UniqueName: \"kubernetes.io/projected/5ce8923d-7830-4418-8466-4613a5fc0b7c-kube-api-access-p7zbw\") pod \"controller-manager-fb864b4d-gq96d\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.367354 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twch8\" (UniqueName: \"kubernetes.io/projected/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-kube-api-access-twch8\") pod \"route-controller-manager-5dcdbd9666-5njxw\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.379715 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.665638 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.829437 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gq96d"] Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.974136 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664" path="/var/lib/kubelet/pods/1f4f45a6-0ca2-4197-9f7b-c16e8bf6a664/volumes" Feb 01 07:28:31 crc kubenswrapper[4650]: I0201 07:28:31.975450 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91daafe2-b686-4074-84df-53cd65561985" path="/var/lib/kubelet/pods/91daafe2-b686-4074-84df-53cd65561985/volumes" Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.187428 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw"] Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.470858 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" event={"ID":"97ee854f-2b6d-4ce6-91f0-297efca1fc4b","Type":"ContainerStarted","Data":"18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370"} Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.471738 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" event={"ID":"97ee854f-2b6d-4ce6-91f0-297efca1fc4b","Type":"ContainerStarted","Data":"a8be2b47513eea6bb5573260604448bc130611d456ec98a3ecda9a6615500a03"} Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.471786 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.472696 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" event={"ID":"5ce8923d-7830-4418-8466-4613a5fc0b7c","Type":"ContainerStarted","Data":"a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576"} Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.472740 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" event={"ID":"5ce8923d-7830-4418-8466-4613a5fc0b7c","Type":"ContainerStarted","Data":"fb94ff1633abb5486bc660f04beb1e839087ed4524c0769708be9eac63f3ac4d"} Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.472925 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.473483 4650 patch_prober.go:28] interesting pod/route-controller-manager-5dcdbd9666-5njxw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.65:8443/healthz\": dial tcp 10.217.0.65:8443: connect: connection refused" start-of-body= Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.473526 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" podUID="97ee854f-2b6d-4ce6-91f0-297efca1fc4b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.65:8443/healthz\": dial tcp 10.217.0.65:8443: connect: connection refused" Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.479211 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.536439 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" podStartSLOduration=3.536414372 podStartE2EDuration="3.536414372s" podCreationTimestamp="2026-02-01 07:28:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:28:32.535868337 +0000 UTC m=+311.258966582" watchObservedRunningTime="2026-02-01 07:28:32.536414372 +0000 UTC m=+311.259512617" Feb 01 07:28:32 crc kubenswrapper[4650]: I0201 07:28:32.538165 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" podStartSLOduration=3.538156399 podStartE2EDuration="3.538156399s" podCreationTimestamp="2026-02-01 07:28:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:28:32.509432215 +0000 UTC m=+311.232530460" watchObservedRunningTime="2026-02-01 07:28:32.538156399 +0000 UTC m=+311.261254644" Feb 01 07:28:33 crc kubenswrapper[4650]: I0201 07:28:33.498308 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:29:07 crc kubenswrapper[4650]: I0201 07:29:07.161653 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:29:07 crc kubenswrapper[4650]: I0201 07:29:07.162539 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.058418 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-g62p7"] Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.059480 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.073011 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-g62p7"] Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.181910 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ecec1f32-5df8-4acf-914c-972dea4dc8d0-registry-certificates\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.182382 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.182412 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ecec1f32-5df8-4acf-914c-972dea4dc8d0-ca-trust-extracted\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.182433 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ecec1f32-5df8-4acf-914c-972dea4dc8d0-installation-pull-secrets\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.182460 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-bound-sa-token\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.182478 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2kvw\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-kube-api-access-q2kvw\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.182687 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-registry-tls\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.182780 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ecec1f32-5df8-4acf-914c-972dea4dc8d0-trusted-ca\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.220900 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.283813 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ecec1f32-5df8-4acf-914c-972dea4dc8d0-ca-trust-extracted\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.283884 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ecec1f32-5df8-4acf-914c-972dea4dc8d0-installation-pull-secrets\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.283927 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-bound-sa-token\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.283955 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2kvw\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-kube-api-access-q2kvw\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.284003 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-registry-tls\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.284057 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ecec1f32-5df8-4acf-914c-972dea4dc8d0-trusted-ca\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.284081 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ecec1f32-5df8-4acf-914c-972dea4dc8d0-registry-certificates\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.285685 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ecec1f32-5df8-4acf-914c-972dea4dc8d0-ca-trust-extracted\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.286260 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ecec1f32-5df8-4acf-914c-972dea4dc8d0-trusted-ca\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.287056 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ecec1f32-5df8-4acf-914c-972dea4dc8d0-registry-certificates\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.292758 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-registry-tls\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.295534 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ecec1f32-5df8-4acf-914c-972dea4dc8d0-installation-pull-secrets\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.304809 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2kvw\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-kube-api-access-q2kvw\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.306965 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ecec1f32-5df8-4acf-914c-972dea4dc8d0-bound-sa-token\") pod \"image-registry-66df7c8f76-g62p7\" (UID: \"ecec1f32-5df8-4acf-914c-972dea4dc8d0\") " pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.412896 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:24 crc kubenswrapper[4650]: I0201 07:29:24.900349 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-g62p7"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.146081 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kwqn7"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.146422 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kwqn7" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="registry-server" containerID="cri-o://b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6" gracePeriod=30 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.160597 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zlgfx"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.160884 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zlgfx" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="registry-server" containerID="cri-o://f8c4cef549eb915c29f1600869d8a8b2e80a9b50126880a81df393c51c72dc51" gracePeriod=30 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.172894 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tprml"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.173112 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" containerID="cri-o://a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db" gracePeriod=30 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.194056 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfl8l"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.194303 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sfl8l" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="registry-server" containerID="cri-o://0ea2c6d0b8c9a44d4da58af04d3fc73f1786fbd62ea417ea20c550041f6bd32b" gracePeriod=30 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.196872 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5s2bx"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.197012 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5s2bx" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="registry-server" containerID="cri-o://99829fed30d2f9425cf2487b6deef0716b3842b7058957b35701ec36e20722d1" gracePeriod=30 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.208037 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vtbm9"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.208759 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.231389 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vtbm9"] Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.308142 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpcg2\" (UniqueName: \"kubernetes.io/projected/384b3fd0-ca99-47ce-9a89-c6bf2d695888-kube-api-access-jpcg2\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.310308 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/384b3fd0-ca99-47ce-9a89-c6bf2d695888-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.310353 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/384b3fd0-ca99-47ce-9a89-c6bf2d695888-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.411946 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpcg2\" (UniqueName: \"kubernetes.io/projected/384b3fd0-ca99-47ce-9a89-c6bf2d695888-kube-api-access-jpcg2\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.412017 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/384b3fd0-ca99-47ce-9a89-c6bf2d695888-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.412063 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/384b3fd0-ca99-47ce-9a89-c6bf2d695888-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.413850 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/384b3fd0-ca99-47ce-9a89-c6bf2d695888-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.423964 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/384b3fd0-ca99-47ce-9a89-c6bf2d695888-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.432349 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpcg2\" (UniqueName: \"kubernetes.io/projected/384b3fd0-ca99-47ce-9a89-c6bf2d695888-kube-api-access-jpcg2\") pod \"marketplace-operator-79b997595-vtbm9\" (UID: \"384b3fd0-ca99-47ce-9a89-c6bf2d695888\") " pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.517382 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.770780 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.816698 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-utilities\") pod \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.816805 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-catalog-content\") pod \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.816855 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk8hf\" (UniqueName: \"kubernetes.io/projected/15308cf7-fed5-4bf2-84e9-ff7ea341303f-kube-api-access-mk8hf\") pod \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\" (UID: \"15308cf7-fed5-4bf2-84e9-ff7ea341303f\") " Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.818137 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-utilities" (OuterVolumeSpecName: "utilities") pod "15308cf7-fed5-4bf2-84e9-ff7ea341303f" (UID: "15308cf7-fed5-4bf2-84e9-ff7ea341303f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.820617 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15308cf7-fed5-4bf2-84e9-ff7ea341303f-kube-api-access-mk8hf" (OuterVolumeSpecName: "kube-api-access-mk8hf") pod "15308cf7-fed5-4bf2-84e9-ff7ea341303f" (UID: "15308cf7-fed5-4bf2-84e9-ff7ea341303f"). InnerVolumeSpecName "kube-api-access-mk8hf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.843276 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.891268 4650 generic.go:334] "Generic (PLEG): container finished" podID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerID="b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6" exitCode=0 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.891330 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwqn7" event={"ID":"15308cf7-fed5-4bf2-84e9-ff7ea341303f","Type":"ContainerDied","Data":"b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6"} Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.891361 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwqn7" event={"ID":"15308cf7-fed5-4bf2-84e9-ff7ea341303f","Type":"ContainerDied","Data":"a6b708c7aa816d82a2c0815ed12b62c0c3954955a61e931da4fdbecefbff6996"} Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.891378 4650 scope.go:117] "RemoveContainer" containerID="b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.891668 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15308cf7-fed5-4bf2-84e9-ff7ea341303f" (UID: "15308cf7-fed5-4bf2-84e9-ff7ea341303f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.891717 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwqn7" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.908508 4650 generic.go:334] "Generic (PLEG): container finished" podID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerID="99829fed30d2f9425cf2487b6deef0716b3842b7058957b35701ec36e20722d1" exitCode=0 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.908591 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5s2bx" event={"ID":"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c","Type":"ContainerDied","Data":"99829fed30d2f9425cf2487b6deef0716b3842b7058957b35701ec36e20722d1"} Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.908835 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.918219 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8nw8\" (UniqueName: \"kubernetes.io/projected/784dfbaa-4863-45d9-ac03-05d772fcb779-kube-api-access-v8nw8\") pod \"784dfbaa-4863-45d9-ac03-05d772fcb779\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.918279 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-trusted-ca\") pod \"784dfbaa-4863-45d9-ac03-05d772fcb779\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.918445 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-operator-metrics\") pod \"784dfbaa-4863-45d9-ac03-05d772fcb779\" (UID: \"784dfbaa-4863-45d9-ac03-05d772fcb779\") " Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.918729 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.918770 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15308cf7-fed5-4bf2-84e9-ff7ea341303f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.918782 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk8hf\" (UniqueName: \"kubernetes.io/projected/15308cf7-fed5-4bf2-84e9-ff7ea341303f-kube-api-access-mk8hf\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.919522 4650 generic.go:334] "Generic (PLEG): container finished" podID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerID="a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db" exitCode=0 Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.919601 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" event={"ID":"784dfbaa-4863-45d9-ac03-05d772fcb779","Type":"ContainerDied","Data":"a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db"} Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.919637 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" event={"ID":"784dfbaa-4863-45d9-ac03-05d772fcb779","Type":"ContainerDied","Data":"ab168db22814f3650f614eb96145ae9893bf8a524a72dea59ff15e532706210a"} Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.919693 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tprml" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.921250 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:29:25 crc kubenswrapper[4650]: I0201 07:29:25.921965 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "784dfbaa-4863-45d9-ac03-05d772fcb779" (UID: "784dfbaa-4863-45d9-ac03-05d772fcb779"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.931548 4650 generic.go:334] "Generic (PLEG): container finished" podID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerID="f8c4cef549eb915c29f1600869d8a8b2e80a9b50126880a81df393c51c72dc51" exitCode=0 Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.931633 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlgfx" event={"ID":"51dbc0bf-4be1-4dcc-b406-262067016c90","Type":"ContainerDied","Data":"f8c4cef549eb915c29f1600869d8a8b2e80a9b50126880a81df393c51c72dc51"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.931748 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlgfx" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.934557 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "784dfbaa-4863-45d9-ac03-05d772fcb779" (UID: "784dfbaa-4863-45d9-ac03-05d772fcb779"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.937123 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/784dfbaa-4863-45d9-ac03-05d772fcb779-kube-api-access-v8nw8" (OuterVolumeSpecName: "kube-api-access-v8nw8") pod "784dfbaa-4863-45d9-ac03-05d772fcb779" (UID: "784dfbaa-4863-45d9-ac03-05d772fcb779"). InnerVolumeSpecName "kube-api-access-v8nw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.943918 4650 generic.go:334] "Generic (PLEG): container finished" podID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerID="0ea2c6d0b8c9a44d4da58af04d3fc73f1786fbd62ea417ea20c550041f6bd32b" exitCode=0 Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.943982 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfl8l" event={"ID":"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0","Type":"ContainerDied","Data":"0ea2c6d0b8c9a44d4da58af04d3fc73f1786fbd62ea417ea20c550041f6bd32b"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.945762 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" event={"ID":"ecec1f32-5df8-4acf-914c-972dea4dc8d0","Type":"ContainerStarted","Data":"65212d55822ed47318fb4a97dcafc50174dbae87e84f67af35f2ac8a814582d4"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.945791 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" event={"ID":"ecec1f32-5df8-4acf-914c-972dea4dc8d0","Type":"ContainerStarted","Data":"153ab78728fe4fd941455ee4cc268e5f3df78c44f484baf989063a40b5607100"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.946524 4650 scope.go:117] "RemoveContainer" containerID="ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.946735 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:25.993599 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.008784 4650 scope.go:117] "RemoveContainer" containerID="e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.011213 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kwqn7"] Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.011239 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kwqn7"] Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.014636 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" podStartSLOduration=2.014620264 podStartE2EDuration="2.014620264s" podCreationTimestamp="2026-02-01 07:29:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:29:26.00962911 +0000 UTC m=+364.732727365" watchObservedRunningTime="2026-02-01 07:29:26.014620264 +0000 UTC m=+364.737718509" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019436 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn9pd\" (UniqueName: \"kubernetes.io/projected/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-kube-api-access-kn9pd\") pod \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019572 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-catalog-content\") pod \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019639 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-utilities\") pod \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\" (UID: \"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019719 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-utilities\") pod \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019773 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-utilities\") pod \"51dbc0bf-4be1-4dcc-b406-262067016c90\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019808 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-catalog-content\") pod \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019853 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mldh\" (UniqueName: \"kubernetes.io/projected/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-kube-api-access-4mldh\") pod \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\" (UID: \"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019904 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-catalog-content\") pod \"51dbc0bf-4be1-4dcc-b406-262067016c90\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.019949 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t99mk\" (UniqueName: \"kubernetes.io/projected/51dbc0bf-4be1-4dcc-b406-262067016c90-kube-api-access-t99mk\") pod \"51dbc0bf-4be1-4dcc-b406-262067016c90\" (UID: \"51dbc0bf-4be1-4dcc-b406-262067016c90\") " Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.020352 4650 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.020368 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8nw8\" (UniqueName: \"kubernetes.io/projected/784dfbaa-4863-45d9-ac03-05d772fcb779-kube-api-access-v8nw8\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.020378 4650 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/784dfbaa-4863-45d9-ac03-05d772fcb779-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.020778 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-utilities" (OuterVolumeSpecName: "utilities") pod "81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" (UID: "81e5cda9-5051-4b4d-a3fb-3acb0b780e4c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.021774 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-utilities" (OuterVolumeSpecName: "utilities") pod "a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" (UID: "a20ec82b-4c7b-41da-9766-3a6d3dbde1c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.022784 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-utilities" (OuterVolumeSpecName: "utilities") pod "51dbc0bf-4be1-4dcc-b406-262067016c90" (UID: "51dbc0bf-4be1-4dcc-b406-262067016c90"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.026371 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51dbc0bf-4be1-4dcc-b406-262067016c90-kube-api-access-t99mk" (OuterVolumeSpecName: "kube-api-access-t99mk") pod "51dbc0bf-4be1-4dcc-b406-262067016c90" (UID: "51dbc0bf-4be1-4dcc-b406-262067016c90"). InnerVolumeSpecName "kube-api-access-t99mk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.040288 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-kube-api-access-kn9pd" (OuterVolumeSpecName: "kube-api-access-kn9pd") pod "81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" (UID: "81e5cda9-5051-4b4d-a3fb-3acb0b780e4c"). InnerVolumeSpecName "kube-api-access-kn9pd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.049627 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-kube-api-access-4mldh" (OuterVolumeSpecName: "kube-api-access-4mldh") pod "a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" (UID: "a20ec82b-4c7b-41da-9766-3a6d3dbde1c0"). InnerVolumeSpecName "kube-api-access-4mldh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.061199 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" (UID: "a20ec82b-4c7b-41da-9766-3a6d3dbde1c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.088567 4650 scope.go:117] "RemoveContainer" containerID="b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6" Feb 01 07:29:26 crc kubenswrapper[4650]: E0201 07:29:26.092369 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6\": container with ID starting with b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6 not found: ID does not exist" containerID="b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.092410 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6"} err="failed to get container status \"b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6\": rpc error: code = NotFound desc = could not find container \"b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6\": container with ID starting with b4546924963669cfcf21b3ba2f91c0fa0075fc568e617e30679a7aa593f249c6 not found: ID does not exist" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.092438 4650 scope.go:117] "RemoveContainer" containerID="ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79" Feb 01 07:29:26 crc kubenswrapper[4650]: E0201 07:29:26.092695 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79\": container with ID starting with ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79 not found: ID does not exist" containerID="ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.092711 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79"} err="failed to get container status \"ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79\": rpc error: code = NotFound desc = could not find container \"ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79\": container with ID starting with ca866d094eba9c01adb6d57a410c0c726b23ce9983e54c49212cf516b4d23c79 not found: ID does not exist" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.092725 4650 scope.go:117] "RemoveContainer" containerID="e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4" Feb 01 07:29:26 crc kubenswrapper[4650]: E0201 07:29:26.092929 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4\": container with ID starting with e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4 not found: ID does not exist" containerID="e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.092945 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4"} err="failed to get container status \"e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4\": rpc error: code = NotFound desc = could not find container \"e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4\": container with ID starting with e94523ad0dab57b917fbcfb1c4f03d9333732ed0e7cf4986afc9e7d98ee418d4 not found: ID does not exist" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.092959 4650 scope.go:117] "RemoveContainer" containerID="a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.108268 4650 scope.go:117] "RemoveContainer" containerID="9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.109060 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51dbc0bf-4be1-4dcc-b406-262067016c90" (UID: "51dbc0bf-4be1-4dcc-b406-262067016c90"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.118602 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vtbm9"] Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122451 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122483 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mldh\" (UniqueName: \"kubernetes.io/projected/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-kube-api-access-4mldh\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122499 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122512 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t99mk\" (UniqueName: \"kubernetes.io/projected/51dbc0bf-4be1-4dcc-b406-262067016c90-kube-api-access-t99mk\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122524 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kn9pd\" (UniqueName: \"kubernetes.io/projected/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-kube-api-access-kn9pd\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122537 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122551 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.122562 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51dbc0bf-4be1-4dcc-b406-262067016c90-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.148130 4650 scope.go:117] "RemoveContainer" containerID="a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db" Feb 01 07:29:26 crc kubenswrapper[4650]: E0201 07:29:26.150019 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db\": container with ID starting with a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db not found: ID does not exist" containerID="a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.150098 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db"} err="failed to get container status \"a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db\": rpc error: code = NotFound desc = could not find container \"a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db\": container with ID starting with a0e882112994ab40a85f1293ddaf723f9f0cd2618539b03390efcb37f92451db not found: ID does not exist" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.150134 4650 scope.go:117] "RemoveContainer" containerID="9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883" Feb 01 07:29:26 crc kubenswrapper[4650]: E0201 07:29:26.150959 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883\": container with ID starting with 9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883 not found: ID does not exist" containerID="9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.151000 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883"} err="failed to get container status \"9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883\": rpc error: code = NotFound desc = could not find container \"9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883\": container with ID starting with 9e3cc4ea8853baec1d44b32d5c24b60272d48fb6f0519b0de9b7d567a9410883 not found: ID does not exist" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.151050 4650 scope.go:117] "RemoveContainer" containerID="f8c4cef549eb915c29f1600869d8a8b2e80a9b50126880a81df393c51c72dc51" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.174296 4650 scope.go:117] "RemoveContainer" containerID="59e92e06ea68ad63db4c12ada7ac7f40b134b96eb5ecbfac6d0780314bb0c483" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.197125 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" (UID: "81e5cda9-5051-4b4d-a3fb-3acb0b780e4c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.224905 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.353815 4650 scope.go:117] "RemoveContainer" containerID="a9757aefe072de324a8de9acbc90470352fdb77d9542aa3ac937fa354573a0db" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.461250 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zlgfx"] Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.470190 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zlgfx"] Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.474412 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tprml"] Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.478067 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tprml"] Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.957773 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5s2bx" event={"ID":"81e5cda9-5051-4b4d-a3fb-3acb0b780e4c","Type":"ContainerDied","Data":"27fbc063cfbfd516a37bb9616f98a014f121f3a1c4097b0317de40ed34ae8b12"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.957825 4650 scope.go:117] "RemoveContainer" containerID="99829fed30d2f9425cf2487b6deef0716b3842b7058957b35701ec36e20722d1" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.957962 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5s2bx" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.975042 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfl8l" event={"ID":"a20ec82b-4c7b-41da-9766-3a6d3dbde1c0","Type":"ContainerDied","Data":"2f7433b9e9c5d4e6fab1201421b6f58b9b31b5a412939346675cc0c9f99a0e00"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.975218 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfl8l" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.985164 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" event={"ID":"384b3fd0-ca99-47ce-9a89-c6bf2d695888","Type":"ContainerStarted","Data":"20519c4937629e17344be586dc252f25787dbda03b45c8abd809e944d005a097"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.985234 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.985246 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" event={"ID":"384b3fd0-ca99-47ce-9a89-c6bf2d695888","Type":"ContainerStarted","Data":"b3f5db47f8726d8c922bb42c2423938a1eb42a0ce320a09385b878206c7995ac"} Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.989157 4650 scope.go:117] "RemoveContainer" containerID="601574de42827a76aa92ff0c22aa81229b9326b6a09b90ed20e8a5cc04bf9800" Feb 01 07:29:26 crc kubenswrapper[4650]: I0201 07:29:26.996008 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.002217 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5s2bx"] Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.003264 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5s2bx"] Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.026948 4650 scope.go:117] "RemoveContainer" containerID="f546ebacaeb8d69835a1842ad39516997ff51a2061d09a38823b59deb74bf6ad" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.028503 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-vtbm9" podStartSLOduration=2.028477844 podStartE2EDuration="2.028477844s" podCreationTimestamp="2026-02-01 07:29:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:29:27.023894439 +0000 UTC m=+365.746992684" watchObservedRunningTime="2026-02-01 07:29:27.028477844 +0000 UTC m=+365.751576079" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.057582 4650 scope.go:117] "RemoveContainer" containerID="0ea2c6d0b8c9a44d4da58af04d3fc73f1786fbd62ea417ea20c550041f6bd32b" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.068829 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfl8l"] Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.070810 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfl8l"] Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.072941 4650 scope.go:117] "RemoveContainer" containerID="3407debaed3b9e454ab05df9f9cc1de75ba19a0080417e33b4c195e8edbdbc35" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.106278 4650 scope.go:117] "RemoveContainer" containerID="aca37bb8e6683e77dfcd256a724c3ed263a590809d2f7749e9cbf2fb4f7f4170" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.972815 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" path="/var/lib/kubelet/pods/15308cf7-fed5-4bf2-84e9-ff7ea341303f/volumes" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.973797 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" path="/var/lib/kubelet/pods/51dbc0bf-4be1-4dcc-b406-262067016c90/volumes" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.974977 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" path="/var/lib/kubelet/pods/784dfbaa-4863-45d9-ac03-05d772fcb779/volumes" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.975962 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" path="/var/lib/kubelet/pods/81e5cda9-5051-4b4d-a3fb-3acb0b780e4c/volumes" Feb 01 07:29:27 crc kubenswrapper[4650]: I0201 07:29:27.976820 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" path="/var/lib/kubelet/pods/a20ec82b-4c7b-41da-9766-3a6d3dbde1c0/volumes" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.294741 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gq96d"] Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.294919 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" podUID="5ce8923d-7830-4418-8466-4613a5fc0b7c" containerName="controller-manager" containerID="cri-o://a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576" gracePeriod=30 Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.356969 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wcj2n"] Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357165 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357177 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357188 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357194 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357205 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357211 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357220 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357225 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357233 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357240 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357248 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357253 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357263 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357268 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357277 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357283 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357290 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357295 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357307 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357312 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357319 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357325 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="extract-utilities" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357333 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357338 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="extract-content" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357345 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357354 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357431 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="81e5cda9-5051-4b4d-a3fb-3acb0b780e4c" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357443 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357452 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="15308cf7-fed5-4bf2-84e9-ff7ea341303f" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357460 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a20ec82b-4c7b-41da-9766-3a6d3dbde1c0" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357469 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="51dbc0bf-4be1-4dcc-b406-262067016c90" containerName="registry-server" Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.357562 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357569 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.357657 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="784dfbaa-4863-45d9-ac03-05d772fcb779" containerName="marketplace-operator" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.358252 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.363430 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.371395 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wcj2n"] Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.464844 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnsrp\" (UniqueName: \"kubernetes.io/projected/ebbbb2f8-c26c-40e3-a357-0d43dee59901-kube-api-access-dnsrp\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.470345 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebbbb2f8-c26c-40e3-a357-0d43dee59901-utilities\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.470599 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebbbb2f8-c26c-40e3-a357-0d43dee59901-catalog-content\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.572114 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnsrp\" (UniqueName: \"kubernetes.io/projected/ebbbb2f8-c26c-40e3-a357-0d43dee59901-kube-api-access-dnsrp\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.572489 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebbbb2f8-c26c-40e3-a357-0d43dee59901-utilities\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.572549 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebbbb2f8-c26c-40e3-a357-0d43dee59901-catalog-content\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.573326 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebbbb2f8-c26c-40e3-a357-0d43dee59901-utilities\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.573366 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebbbb2f8-c26c-40e3-a357-0d43dee59901-catalog-content\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.597528 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnsrp\" (UniqueName: \"kubernetes.io/projected/ebbbb2f8-c26c-40e3-a357-0d43dee59901-kube-api-access-dnsrp\") pod \"redhat-marketplace-wcj2n\" (UID: \"ebbbb2f8-c26c-40e3-a357-0d43dee59901\") " pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.715228 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.782714 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.876336 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-client-ca\") pod \"5ce8923d-7830-4418-8466-4613a5fc0b7c\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.876787 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7zbw\" (UniqueName: \"kubernetes.io/projected/5ce8923d-7830-4418-8466-4613a5fc0b7c-kube-api-access-p7zbw\") pod \"5ce8923d-7830-4418-8466-4613a5fc0b7c\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.876847 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-config\") pod \"5ce8923d-7830-4418-8466-4613a5fc0b7c\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.876890 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-proxy-ca-bundles\") pod \"5ce8923d-7830-4418-8466-4613a5fc0b7c\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.876923 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce8923d-7830-4418-8466-4613a5fc0b7c-serving-cert\") pod \"5ce8923d-7830-4418-8466-4613a5fc0b7c\" (UID: \"5ce8923d-7830-4418-8466-4613a5fc0b7c\") " Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.878656 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5ce8923d-7830-4418-8466-4613a5fc0b7c" (UID: "5ce8923d-7830-4418-8466-4613a5fc0b7c"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.878666 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-client-ca" (OuterVolumeSpecName: "client-ca") pod "5ce8923d-7830-4418-8466-4613a5fc0b7c" (UID: "5ce8923d-7830-4418-8466-4613a5fc0b7c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.878801 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-config" (OuterVolumeSpecName: "config") pod "5ce8923d-7830-4418-8466-4613a5fc0b7c" (UID: "5ce8923d-7830-4418-8466-4613a5fc0b7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.881134 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ce8923d-7830-4418-8466-4613a5fc0b7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5ce8923d-7830-4418-8466-4613a5fc0b7c" (UID: "5ce8923d-7830-4418-8466-4613a5fc0b7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.881401 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ce8923d-7830-4418-8466-4613a5fc0b7c-kube-api-access-p7zbw" (OuterVolumeSpecName: "kube-api-access-p7zbw") pod "5ce8923d-7830-4418-8466-4613a5fc0b7c" (UID: "5ce8923d-7830-4418-8466-4613a5fc0b7c"). InnerVolumeSpecName "kube-api-access-p7zbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.956723 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vn96n"] Feb 01 07:29:29 crc kubenswrapper[4650]: E0201 07:29:29.956909 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce8923d-7830-4418-8466-4613a5fc0b7c" containerName="controller-manager" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.956921 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce8923d-7830-4418-8466-4613a5fc0b7c" containerName="controller-manager" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.957016 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ce8923d-7830-4418-8466-4613a5fc0b7c" containerName="controller-manager" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.957895 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.960854 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.977967 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.977992 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7zbw\" (UniqueName: \"kubernetes.io/projected/5ce8923d-7830-4418-8466-4613a5fc0b7c-kube-api-access-p7zbw\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.978004 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.978013 4650 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5ce8923d-7830-4418-8466-4613a5fc0b7c-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.978042 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce8923d-7830-4418-8466-4613a5fc0b7c-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:29 crc kubenswrapper[4650]: I0201 07:29:29.987561 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vn96n"] Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.012423 4650 generic.go:334] "Generic (PLEG): container finished" podID="5ce8923d-7830-4418-8466-4613a5fc0b7c" containerID="a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576" exitCode=0 Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.012461 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.012490 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" event={"ID":"5ce8923d-7830-4418-8466-4613a5fc0b7c","Type":"ContainerDied","Data":"a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576"} Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.012530 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gq96d" event={"ID":"5ce8923d-7830-4418-8466-4613a5fc0b7c","Type":"ContainerDied","Data":"fb94ff1633abb5486bc660f04beb1e839087ed4524c0769708be9eac63f3ac4d"} Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.012553 4650 scope.go:117] "RemoveContainer" containerID="a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.034236 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gq96d"] Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.035662 4650 scope.go:117] "RemoveContainer" containerID="a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576" Feb 01 07:29:30 crc kubenswrapper[4650]: E0201 07:29:30.036096 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576\": container with ID starting with a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576 not found: ID does not exist" containerID="a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.036136 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576"} err="failed to get container status \"a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576\": rpc error: code = NotFound desc = could not find container \"a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576\": container with ID starting with a30d406a5a8257cf06413353dc28ef044e68cab0fb7c58470447fe748edcc576 not found: ID does not exist" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.037081 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gq96d"] Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.078869 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-catalog-content\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.078947 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nxfd\" (UniqueName: \"kubernetes.io/projected/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-kube-api-access-6nxfd\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.079125 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-utilities\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.180290 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-catalog-content\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.180338 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nxfd\" (UniqueName: \"kubernetes.io/projected/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-kube-api-access-6nxfd\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.180367 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-utilities\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.180856 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-utilities\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.181173 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-catalog-content\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: W0201 07:29:30.190240 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebbbb2f8_c26c_40e3_a357_0d43dee59901.slice/crio-58ab9ba0580e82d92eeb82b1efa011cc424c576f32450fa6ae2eb00fe78460a3 WatchSource:0}: Error finding container 58ab9ba0580e82d92eeb82b1efa011cc424c576f32450fa6ae2eb00fe78460a3: Status 404 returned error can't find the container with id 58ab9ba0580e82d92eeb82b1efa011cc424c576f32450fa6ae2eb00fe78460a3 Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.191549 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wcj2n"] Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.199840 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nxfd\" (UniqueName: \"kubernetes.io/projected/fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9-kube-api-access-6nxfd\") pod \"redhat-operators-vn96n\" (UID: \"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9\") " pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.288309 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:30 crc kubenswrapper[4650]: I0201 07:29:30.477357 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vn96n"] Feb 01 07:29:30 crc kubenswrapper[4650]: W0201 07:29:30.478988 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd8d740d_ffe6_47ef_8c1e_8dfdafc2c9b9.slice/crio-73f71c07d605b5af119440423ce0fe766ac095e34d4f538c29e57b9b9bc5ab2b WatchSource:0}: Error finding container 73f71c07d605b5af119440423ce0fe766ac095e34d4f538c29e57b9b9bc5ab2b: Status 404 returned error can't find the container with id 73f71c07d605b5af119440423ce0fe766ac095e34d4f538c29e57b9b9bc5ab2b Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.019970 4650 generic.go:334] "Generic (PLEG): container finished" podID="fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9" containerID="2d59aee15f25715ca0d7c6751e95dac98af2ba02e080dee0084eec697d77aebd" exitCode=0 Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.020064 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vn96n" event={"ID":"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9","Type":"ContainerDied","Data":"2d59aee15f25715ca0d7c6751e95dac98af2ba02e080dee0084eec697d77aebd"} Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.020563 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vn96n" event={"ID":"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9","Type":"ContainerStarted","Data":"73f71c07d605b5af119440423ce0fe766ac095e34d4f538c29e57b9b9bc5ab2b"} Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.023569 4650 generic.go:334] "Generic (PLEG): container finished" podID="ebbbb2f8-c26c-40e3-a357-0d43dee59901" containerID="c16da805f002747df5ff2084852bdf5d40f67508d0eda88f59ebd6655ce39c1e" exitCode=0 Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.023657 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcj2n" event={"ID":"ebbbb2f8-c26c-40e3-a357-0d43dee59901","Type":"ContainerDied","Data":"c16da805f002747df5ff2084852bdf5d40f67508d0eda88f59ebd6655ce39c1e"} Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.023690 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcj2n" event={"ID":"ebbbb2f8-c26c-40e3-a357-0d43dee59901","Type":"ContainerStarted","Data":"58ab9ba0580e82d92eeb82b1efa011cc424c576f32450fa6ae2eb00fe78460a3"} Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.125626 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf"] Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.126856 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.129871 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.130730 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.130773 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.130730 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.132779 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.133486 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.136388 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf"] Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.142252 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.295290 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-config\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.295365 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-proxy-ca-bundles\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.295401 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gh2l\" (UniqueName: \"kubernetes.io/projected/2a020a31-7ddb-444d-853e-042bca59b0b0-kube-api-access-4gh2l\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.295437 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-client-ca\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.295567 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a020a31-7ddb-444d-853e-042bca59b0b0-serving-cert\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.397287 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a020a31-7ddb-444d-853e-042bca59b0b0-serving-cert\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.397399 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-config\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.397466 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-proxy-ca-bundles\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.397529 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gh2l\" (UniqueName: \"kubernetes.io/projected/2a020a31-7ddb-444d-853e-042bca59b0b0-kube-api-access-4gh2l\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.397565 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-client-ca\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.398992 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-client-ca\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.399004 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-config\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.399164 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2a020a31-7ddb-444d-853e-042bca59b0b0-proxy-ca-bundles\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.412210 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a020a31-7ddb-444d-853e-042bca59b0b0-serving-cert\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.417069 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gh2l\" (UniqueName: \"kubernetes.io/projected/2a020a31-7ddb-444d-853e-042bca59b0b0-kube-api-access-4gh2l\") pod \"controller-manager-6c8b55bd9c-rgsgf\" (UID: \"2a020a31-7ddb-444d-853e-042bca59b0b0\") " pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.450765 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.708438 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf"] Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.752613 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bmfh5"] Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.753813 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.769162 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bmfh5"] Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.771085 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.907345 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da8c05dd-e911-487b-ac7c-025796f9d671-utilities\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.907828 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da8c05dd-e911-487b-ac7c-025796f9d671-catalog-content\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.908019 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4vkx\" (UniqueName: \"kubernetes.io/projected/da8c05dd-e911-487b-ac7c-025796f9d671-kube-api-access-q4vkx\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:31 crc kubenswrapper[4650]: I0201 07:29:31.972866 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ce8923d-7830-4418-8466-4613a5fc0b7c" path="/var/lib/kubelet/pods/5ce8923d-7830-4418-8466-4613a5fc0b7c/volumes" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.009798 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da8c05dd-e911-487b-ac7c-025796f9d671-catalog-content\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.010489 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4vkx\" (UniqueName: \"kubernetes.io/projected/da8c05dd-e911-487b-ac7c-025796f9d671-kube-api-access-q4vkx\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.010401 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da8c05dd-e911-487b-ac7c-025796f9d671-catalog-content\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.010526 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da8c05dd-e911-487b-ac7c-025796f9d671-utilities\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.010867 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da8c05dd-e911-487b-ac7c-025796f9d671-utilities\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.028527 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4vkx\" (UniqueName: \"kubernetes.io/projected/da8c05dd-e911-487b-ac7c-025796f9d671-kube-api-access-q4vkx\") pod \"certified-operators-bmfh5\" (UID: \"da8c05dd-e911-487b-ac7c-025796f9d671\") " pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.033155 4650 generic.go:334] "Generic (PLEG): container finished" podID="ebbbb2f8-c26c-40e3-a357-0d43dee59901" containerID="cd9b4f4c3949d00dba101c55b4d103fc27ff634b0b2017f1163a001a4102c886" exitCode=0 Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.033228 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcj2n" event={"ID":"ebbbb2f8-c26c-40e3-a357-0d43dee59901","Type":"ContainerDied","Data":"cd9b4f4c3949d00dba101c55b4d103fc27ff634b0b2017f1163a001a4102c886"} Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.034976 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vn96n" event={"ID":"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9","Type":"ContainerStarted","Data":"99a3506b6032fb8f9f70c71b24859424483201038dd76a58a136b658478d9fc5"} Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.037868 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" event={"ID":"2a020a31-7ddb-444d-853e-042bca59b0b0","Type":"ContainerStarted","Data":"7be216a9b3d49036c2eb8cf0eddf61e867e95fa235c8c1cc3a7f5b606aed97be"} Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.037916 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" event={"ID":"2a020a31-7ddb-444d-853e-042bca59b0b0","Type":"ContainerStarted","Data":"c32dd1665e295c20d059fdd46b667b345bfa84c3ac5dac0f2b22e182468dd67b"} Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.038131 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.043134 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.093730 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c8b55bd9c-rgsgf" podStartSLOduration=3.0937142 podStartE2EDuration="3.0937142s" podCreationTimestamp="2026-02-01 07:29:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:29:32.093496355 +0000 UTC m=+370.816594610" watchObservedRunningTime="2026-02-01 07:29:32.0937142 +0000 UTC m=+370.816812445" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.105858 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.364340 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-llp7t"] Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.377792 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.385465 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.400416 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-llp7t"] Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.456957 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bmfh5"] Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.524260 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-catalog-content\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.524354 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-utilities\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.524388 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlkbr\" (UniqueName: \"kubernetes.io/projected/e223410d-6b2d-464a-8e86-4355dbf698b2-kube-api-access-rlkbr\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.625924 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-catalog-content\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.625980 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-utilities\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.626038 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlkbr\" (UniqueName: \"kubernetes.io/projected/e223410d-6b2d-464a-8e86-4355dbf698b2-kube-api-access-rlkbr\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.627656 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-catalog-content\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.628186 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-utilities\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.654895 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlkbr\" (UniqueName: \"kubernetes.io/projected/e223410d-6b2d-464a-8e86-4355dbf698b2-kube-api-access-rlkbr\") pod \"community-operators-llp7t\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:32 crc kubenswrapper[4650]: I0201 07:29:32.717368 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.044598 4650 generic.go:334] "Generic (PLEG): container finished" podID="da8c05dd-e911-487b-ac7c-025796f9d671" containerID="28eaa96e8b8a20a1200d800c30d8c8f2b09ab5b2a34cf514ee6eab2b44f59f6f" exitCode=0 Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.045038 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bmfh5" event={"ID":"da8c05dd-e911-487b-ac7c-025796f9d671","Type":"ContainerDied","Data":"28eaa96e8b8a20a1200d800c30d8c8f2b09ab5b2a34cf514ee6eab2b44f59f6f"} Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.045094 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bmfh5" event={"ID":"da8c05dd-e911-487b-ac7c-025796f9d671","Type":"ContainerStarted","Data":"ebd91fd710f0fc8baf882611f84b277c3386a203443a7798688e143a32621f11"} Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.052437 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcj2n" event={"ID":"ebbbb2f8-c26c-40e3-a357-0d43dee59901","Type":"ContainerStarted","Data":"fafb159044e3b2aaf5aa3ae04b46ae04b00a232667b90cfdf6f7ad0c649f4ea3"} Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.054884 4650 generic.go:334] "Generic (PLEG): container finished" podID="fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9" containerID="99a3506b6032fb8f9f70c71b24859424483201038dd76a58a136b658478d9fc5" exitCode=0 Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.054998 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vn96n" event={"ID":"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9","Type":"ContainerDied","Data":"99a3506b6032fb8f9f70c71b24859424483201038dd76a58a136b658478d9fc5"} Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.103707 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wcj2n" podStartSLOduration=2.673658765 podStartE2EDuration="4.103690791s" podCreationTimestamp="2026-02-01 07:29:29 +0000 UTC" firstStartedPulling="2026-02-01 07:29:31.025157256 +0000 UTC m=+369.748255501" lastFinishedPulling="2026-02-01 07:29:32.455189282 +0000 UTC m=+371.178287527" observedRunningTime="2026-02-01 07:29:33.099114986 +0000 UTC m=+371.822213241" watchObservedRunningTime="2026-02-01 07:29:33.103690791 +0000 UTC m=+371.826789036" Feb 01 07:29:33 crc kubenswrapper[4650]: I0201 07:29:33.218058 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-llp7t"] Feb 01 07:29:33 crc kubenswrapper[4650]: W0201 07:29:33.219489 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode223410d_6b2d_464a_8e86_4355dbf698b2.slice/crio-1f142c3619a581b2911ba26eba4650027b87b48c263daf50e2d0c4e6eaa19195 WatchSource:0}: Error finding container 1f142c3619a581b2911ba26eba4650027b87b48c263daf50e2d0c4e6eaa19195: Status 404 returned error can't find the container with id 1f142c3619a581b2911ba26eba4650027b87b48c263daf50e2d0c4e6eaa19195 Feb 01 07:29:34 crc kubenswrapper[4650]: I0201 07:29:34.062043 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bmfh5" event={"ID":"da8c05dd-e911-487b-ac7c-025796f9d671","Type":"ContainerStarted","Data":"8fe6b9cca602f659898133a09e20454088b9322991d682cb6e5647646a233642"} Feb 01 07:29:34 crc kubenswrapper[4650]: I0201 07:29:34.065572 4650 generic.go:334] "Generic (PLEG): container finished" podID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerID="a5d911ab05f50f3861d02e496c5c922a3b14926fd707450d668eaefe4635c1df" exitCode=0 Feb 01 07:29:34 crc kubenswrapper[4650]: I0201 07:29:34.065615 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llp7t" event={"ID":"e223410d-6b2d-464a-8e86-4355dbf698b2","Type":"ContainerDied","Data":"a5d911ab05f50f3861d02e496c5c922a3b14926fd707450d668eaefe4635c1df"} Feb 01 07:29:34 crc kubenswrapper[4650]: I0201 07:29:34.065630 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llp7t" event={"ID":"e223410d-6b2d-464a-8e86-4355dbf698b2","Type":"ContainerStarted","Data":"1f142c3619a581b2911ba26eba4650027b87b48c263daf50e2d0c4e6eaa19195"} Feb 01 07:29:34 crc kubenswrapper[4650]: I0201 07:29:34.075559 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vn96n" event={"ID":"fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9","Type":"ContainerStarted","Data":"147514c3128e19a4e74c2e51303109616371149dc4444283dfecb624203b0aa7"} Feb 01 07:29:34 crc kubenswrapper[4650]: I0201 07:29:34.140252 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vn96n" podStartSLOduration=2.718294919 podStartE2EDuration="5.140234062s" podCreationTimestamp="2026-02-01 07:29:29 +0000 UTC" firstStartedPulling="2026-02-01 07:29:31.021666706 +0000 UTC m=+369.744764951" lastFinishedPulling="2026-02-01 07:29:33.443605849 +0000 UTC m=+372.166704094" observedRunningTime="2026-02-01 07:29:34.136477296 +0000 UTC m=+372.859575551" watchObservedRunningTime="2026-02-01 07:29:34.140234062 +0000 UTC m=+372.863332307" Feb 01 07:29:35 crc kubenswrapper[4650]: I0201 07:29:35.085152 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llp7t" event={"ID":"e223410d-6b2d-464a-8e86-4355dbf698b2","Type":"ContainerStarted","Data":"cc02dbec7219a84173eca6702ab1ee07d02e86b0b429ba3ff0f0e99e6f76caaf"} Feb 01 07:29:35 crc kubenswrapper[4650]: I0201 07:29:35.091946 4650 generic.go:334] "Generic (PLEG): container finished" podID="da8c05dd-e911-487b-ac7c-025796f9d671" containerID="8fe6b9cca602f659898133a09e20454088b9322991d682cb6e5647646a233642" exitCode=0 Feb 01 07:29:35 crc kubenswrapper[4650]: I0201 07:29:35.092061 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bmfh5" event={"ID":"da8c05dd-e911-487b-ac7c-025796f9d671","Type":"ContainerDied","Data":"8fe6b9cca602f659898133a09e20454088b9322991d682cb6e5647646a233642"} Feb 01 07:29:36 crc kubenswrapper[4650]: I0201 07:29:36.099330 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bmfh5" event={"ID":"da8c05dd-e911-487b-ac7c-025796f9d671","Type":"ContainerStarted","Data":"13c014d655fe7d95a7c859f900c99778af1acd8003768d38803cb372842273d2"} Feb 01 07:29:36 crc kubenswrapper[4650]: I0201 07:29:36.102001 4650 generic.go:334] "Generic (PLEG): container finished" podID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerID="cc02dbec7219a84173eca6702ab1ee07d02e86b0b429ba3ff0f0e99e6f76caaf" exitCode=0 Feb 01 07:29:36 crc kubenswrapper[4650]: I0201 07:29:36.102105 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llp7t" event={"ID":"e223410d-6b2d-464a-8e86-4355dbf698b2","Type":"ContainerDied","Data":"cc02dbec7219a84173eca6702ab1ee07d02e86b0b429ba3ff0f0e99e6f76caaf"} Feb 01 07:29:36 crc kubenswrapper[4650]: I0201 07:29:36.119754 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bmfh5" podStartSLOduration=2.6746323629999997 podStartE2EDuration="5.119741747s" podCreationTimestamp="2026-02-01 07:29:31 +0000 UTC" firstStartedPulling="2026-02-01 07:29:33.0469116 +0000 UTC m=+371.770009835" lastFinishedPulling="2026-02-01 07:29:35.492020974 +0000 UTC m=+374.215119219" observedRunningTime="2026-02-01 07:29:36.1181486 +0000 UTC m=+374.841246855" watchObservedRunningTime="2026-02-01 07:29:36.119741747 +0000 UTC m=+374.842839982" Feb 01 07:29:37 crc kubenswrapper[4650]: I0201 07:29:37.161173 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:29:37 crc kubenswrapper[4650]: I0201 07:29:37.161608 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:29:38 crc kubenswrapper[4650]: I0201 07:29:38.114895 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llp7t" event={"ID":"e223410d-6b2d-464a-8e86-4355dbf698b2","Type":"ContainerStarted","Data":"35b4b9a383e2198158a1b51014f313d213a35cb1ab6d89b9c3c4876fd49e3c50"} Feb 01 07:29:38 crc kubenswrapper[4650]: I0201 07:29:38.138872 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-llp7t" podStartSLOduration=3.704402239 podStartE2EDuration="6.138852708s" podCreationTimestamp="2026-02-01 07:29:32 +0000 UTC" firstStartedPulling="2026-02-01 07:29:34.06771475 +0000 UTC m=+372.790812995" lastFinishedPulling="2026-02-01 07:29:36.502165209 +0000 UTC m=+375.225263464" observedRunningTime="2026-02-01 07:29:38.13629762 +0000 UTC m=+376.859395865" watchObservedRunningTime="2026-02-01 07:29:38.138852708 +0000 UTC m=+376.861950953" Feb 01 07:29:39 crc kubenswrapper[4650]: I0201 07:29:39.783941 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:39 crc kubenswrapper[4650]: I0201 07:29:39.784265 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:39 crc kubenswrapper[4650]: I0201 07:29:39.831493 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:40 crc kubenswrapper[4650]: I0201 07:29:40.162593 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wcj2n" Feb 01 07:29:40 crc kubenswrapper[4650]: I0201 07:29:40.288843 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:40 crc kubenswrapper[4650]: I0201 07:29:40.288901 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:40 crc kubenswrapper[4650]: I0201 07:29:40.344612 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:41 crc kubenswrapper[4650]: I0201 07:29:41.177217 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vn96n" Feb 01 07:29:42 crc kubenswrapper[4650]: I0201 07:29:42.106535 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:42 crc kubenswrapper[4650]: I0201 07:29:42.106594 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:42 crc kubenswrapper[4650]: I0201 07:29:42.166270 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:42 crc kubenswrapper[4650]: I0201 07:29:42.221311 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bmfh5" Feb 01 07:29:42 crc kubenswrapper[4650]: I0201 07:29:42.718364 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:42 crc kubenswrapper[4650]: I0201 07:29:42.718448 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:42 crc kubenswrapper[4650]: I0201 07:29:42.779776 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:43 crc kubenswrapper[4650]: I0201 07:29:43.195123 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:29:44 crc kubenswrapper[4650]: I0201 07:29:44.420952 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-g62p7" Feb 01 07:29:44 crc kubenswrapper[4650]: I0201 07:29:44.480134 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-crkwn"] Feb 01 07:29:49 crc kubenswrapper[4650]: I0201 07:29:49.357107 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw"] Feb 01 07:29:49 crc kubenswrapper[4650]: I0201 07:29:49.357841 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" podUID="97ee854f-2b6d-4ce6-91f0-297efca1fc4b" containerName="route-controller-manager" containerID="cri-o://18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370" gracePeriod=30 Feb 01 07:29:49 crc kubenswrapper[4650]: I0201 07:29:49.934264 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.109827 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-client-ca\") pod \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.109917 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-serving-cert\") pod \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.110007 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-config\") pod \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.110069 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twch8\" (UniqueName: \"kubernetes.io/projected/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-kube-api-access-twch8\") pod \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\" (UID: \"97ee854f-2b6d-4ce6-91f0-297efca1fc4b\") " Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.112540 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-client-ca" (OuterVolumeSpecName: "client-ca") pod "97ee854f-2b6d-4ce6-91f0-297efca1fc4b" (UID: "97ee854f-2b6d-4ce6-91f0-297efca1fc4b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.112639 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-config" (OuterVolumeSpecName: "config") pod "97ee854f-2b6d-4ce6-91f0-297efca1fc4b" (UID: "97ee854f-2b6d-4ce6-91f0-297efca1fc4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.116988 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "97ee854f-2b6d-4ce6-91f0-297efca1fc4b" (UID: "97ee854f-2b6d-4ce6-91f0-297efca1fc4b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.121332 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-kube-api-access-twch8" (OuterVolumeSpecName: "kube-api-access-twch8") pod "97ee854f-2b6d-4ce6-91f0-297efca1fc4b" (UID: "97ee854f-2b6d-4ce6-91f0-297efca1fc4b"). InnerVolumeSpecName "kube-api-access-twch8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.201461 4650 generic.go:334] "Generic (PLEG): container finished" podID="97ee854f-2b6d-4ce6-91f0-297efca1fc4b" containerID="18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370" exitCode=0 Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.201535 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" event={"ID":"97ee854f-2b6d-4ce6-91f0-297efca1fc4b","Type":"ContainerDied","Data":"18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370"} Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.201577 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" event={"ID":"97ee854f-2b6d-4ce6-91f0-297efca1fc4b","Type":"ContainerDied","Data":"a8be2b47513eea6bb5573260604448bc130611d456ec98a3ecda9a6615500a03"} Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.201604 4650 scope.go:117] "RemoveContainer" containerID="18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.201770 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.211895 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.211935 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twch8\" (UniqueName: \"kubernetes.io/projected/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-kube-api-access-twch8\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.211951 4650 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-client-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.211967 4650 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97ee854f-2b6d-4ce6-91f0-297efca1fc4b-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.219983 4650 scope.go:117] "RemoveContainer" containerID="18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370" Feb 01 07:29:50 crc kubenswrapper[4650]: E0201 07:29:50.220820 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370\": container with ID starting with 18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370 not found: ID does not exist" containerID="18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.220888 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370"} err="failed to get container status \"18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370\": rpc error: code = NotFound desc = could not find container \"18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370\": container with ID starting with 18eacfac8f7e9ece2855a2de0dc7f813fd68f8d321db73038f2873cbc63c5370 not found: ID does not exist" Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.240827 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw"] Feb 01 07:29:50 crc kubenswrapper[4650]: I0201 07:29:50.262411 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-5njxw"] Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.137180 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml"] Feb 01 07:29:51 crc kubenswrapper[4650]: E0201 07:29:51.137404 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97ee854f-2b6d-4ce6-91f0-297efca1fc4b" containerName="route-controller-manager" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.137416 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="97ee854f-2b6d-4ce6-91f0-297efca1fc4b" containerName="route-controller-manager" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.137503 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="97ee854f-2b6d-4ce6-91f0-297efca1fc4b" containerName="route-controller-manager" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.137863 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.140942 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.141226 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.142442 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.142609 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.146156 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.146348 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.153984 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml"] Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.226751 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c29f4541-651e-48cd-a3a7-f91366402059-serving-cert\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.226866 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c667k\" (UniqueName: \"kubernetes.io/projected/c29f4541-651e-48cd-a3a7-f91366402059-kube-api-access-c667k\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.226899 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c29f4541-651e-48cd-a3a7-f91366402059-config\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.226917 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c29f4541-651e-48cd-a3a7-f91366402059-client-ca\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.328149 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c29f4541-651e-48cd-a3a7-f91366402059-serving-cert\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.328231 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c667k\" (UniqueName: \"kubernetes.io/projected/c29f4541-651e-48cd-a3a7-f91366402059-kube-api-access-c667k\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.328260 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c29f4541-651e-48cd-a3a7-f91366402059-config\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.328276 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c29f4541-651e-48cd-a3a7-f91366402059-client-ca\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.329211 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c29f4541-651e-48cd-a3a7-f91366402059-client-ca\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.329710 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c29f4541-651e-48cd-a3a7-f91366402059-config\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.332800 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c29f4541-651e-48cd-a3a7-f91366402059-serving-cert\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.346536 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c667k\" (UniqueName: \"kubernetes.io/projected/c29f4541-651e-48cd-a3a7-f91366402059-kube-api-access-c667k\") pod \"route-controller-manager-6fd9c7dcbc-mqgml\" (UID: \"c29f4541-651e-48cd-a3a7-f91366402059\") " pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.456284 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.972744 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97ee854f-2b6d-4ce6-91f0-297efca1fc4b" path="/var/lib/kubelet/pods/97ee854f-2b6d-4ce6-91f0-297efca1fc4b/volumes" Feb 01 07:29:51 crc kubenswrapper[4650]: I0201 07:29:51.986873 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml"] Feb 01 07:29:52 crc kubenswrapper[4650]: W0201 07:29:52.008252 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc29f4541_651e_48cd_a3a7_f91366402059.slice/crio-4d9b87834e5a15c8a0969a1b4ab15eb924c0ca3614bc8b4b224885256ea3b16f WatchSource:0}: Error finding container 4d9b87834e5a15c8a0969a1b4ab15eb924c0ca3614bc8b4b224885256ea3b16f: Status 404 returned error can't find the container with id 4d9b87834e5a15c8a0969a1b4ab15eb924c0ca3614bc8b4b224885256ea3b16f Feb 01 07:29:52 crc kubenswrapper[4650]: I0201 07:29:52.214599 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" event={"ID":"c29f4541-651e-48cd-a3a7-f91366402059","Type":"ContainerStarted","Data":"7201fe718ae5205d2c0842b86be533632c0c8ebe90a0b9131fd7ba1ed64c7d8a"} Feb 01 07:29:52 crc kubenswrapper[4650]: I0201 07:29:52.214967 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:29:52 crc kubenswrapper[4650]: I0201 07:29:52.214992 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" event={"ID":"c29f4541-651e-48cd-a3a7-f91366402059","Type":"ContainerStarted","Data":"4d9b87834e5a15c8a0969a1b4ab15eb924c0ca3614bc8b4b224885256ea3b16f"} Feb 01 07:29:52 crc kubenswrapper[4650]: I0201 07:29:52.216233 4650 patch_prober.go:28] interesting pod/route-controller-manager-6fd9c7dcbc-mqgml container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.74:8443/healthz\": dial tcp 10.217.0.74:8443: connect: connection refused" start-of-body= Feb 01 07:29:52 crc kubenswrapper[4650]: I0201 07:29:52.216274 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" podUID="c29f4541-651e-48cd-a3a7-f91366402059" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.74:8443/healthz\": dial tcp 10.217.0.74:8443: connect: connection refused" Feb 01 07:29:52 crc kubenswrapper[4650]: I0201 07:29:52.234132 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" podStartSLOduration=3.234114522 podStartE2EDuration="3.234114522s" podCreationTimestamp="2026-02-01 07:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:29:52.231625395 +0000 UTC m=+390.954723640" watchObservedRunningTime="2026-02-01 07:29:52.234114522 +0000 UTC m=+390.957212767" Feb 01 07:29:53 crc kubenswrapper[4650]: I0201 07:29:53.227248 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6fd9c7dcbc-mqgml" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.206423 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz"] Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.208010 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.214000 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.214216 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.220509 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz"] Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.363460 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7091e599-b67c-44d8-945c-329bac31dd6d-secret-volume\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.363650 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7091e599-b67c-44d8-945c-329bac31dd6d-config-volume\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.363710 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlldn\" (UniqueName: \"kubernetes.io/projected/7091e599-b67c-44d8-945c-329bac31dd6d-kube-api-access-dlldn\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.464903 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7091e599-b67c-44d8-945c-329bac31dd6d-secret-volume\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.465122 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7091e599-b67c-44d8-945c-329bac31dd6d-config-volume\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.465188 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlldn\" (UniqueName: \"kubernetes.io/projected/7091e599-b67c-44d8-945c-329bac31dd6d-kube-api-access-dlldn\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.466821 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7091e599-b67c-44d8-945c-329bac31dd6d-config-volume\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.474602 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7091e599-b67c-44d8-945c-329bac31dd6d-secret-volume\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.495969 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlldn\" (UniqueName: \"kubernetes.io/projected/7091e599-b67c-44d8-945c-329bac31dd6d-kube-api-access-dlldn\") pod \"collect-profiles-29498850-6hlsz\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:00 crc kubenswrapper[4650]: I0201 07:30:00.548551 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:01 crc kubenswrapper[4650]: I0201 07:30:01.053061 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz"] Feb 01 07:30:01 crc kubenswrapper[4650]: I0201 07:30:01.306914 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" event={"ID":"7091e599-b67c-44d8-945c-329bac31dd6d","Type":"ContainerStarted","Data":"bd95859aed792eacf3a5edd64fccdcd14a6c094b34f7f182e8f0adbbb000c6ac"} Feb 01 07:30:01 crc kubenswrapper[4650]: I0201 07:30:01.307129 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" event={"ID":"7091e599-b67c-44d8-945c-329bac31dd6d","Type":"ContainerStarted","Data":"3f68472b21b2a8e9a2e6c4348dc7769efbf9671008f11407fef4e62746c2d477"} Feb 01 07:30:01 crc kubenswrapper[4650]: I0201 07:30:01.336144 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" podStartSLOduration=1.336113624 podStartE2EDuration="1.336113624s" podCreationTimestamp="2026-02-01 07:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:30:01.332296285 +0000 UTC m=+400.055394540" watchObservedRunningTime="2026-02-01 07:30:01.336113624 +0000 UTC m=+400.059211879" Feb 01 07:30:02 crc kubenswrapper[4650]: I0201 07:30:02.316363 4650 generic.go:334] "Generic (PLEG): container finished" podID="7091e599-b67c-44d8-945c-329bac31dd6d" containerID="bd95859aed792eacf3a5edd64fccdcd14a6c094b34f7f182e8f0adbbb000c6ac" exitCode=0 Feb 01 07:30:02 crc kubenswrapper[4650]: I0201 07:30:02.316436 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" event={"ID":"7091e599-b67c-44d8-945c-329bac31dd6d","Type":"ContainerDied","Data":"bd95859aed792eacf3a5edd64fccdcd14a6c094b34f7f182e8f0adbbb000c6ac"} Feb 01 07:30:03 crc kubenswrapper[4650]: I0201 07:30:03.790344 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:03 crc kubenswrapper[4650]: I0201 07:30:03.923956 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7091e599-b67c-44d8-945c-329bac31dd6d-config-volume\") pod \"7091e599-b67c-44d8-945c-329bac31dd6d\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " Feb 01 07:30:03 crc kubenswrapper[4650]: I0201 07:30:03.924165 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7091e599-b67c-44d8-945c-329bac31dd6d-secret-volume\") pod \"7091e599-b67c-44d8-945c-329bac31dd6d\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " Feb 01 07:30:03 crc kubenswrapper[4650]: I0201 07:30:03.924206 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlldn\" (UniqueName: \"kubernetes.io/projected/7091e599-b67c-44d8-945c-329bac31dd6d-kube-api-access-dlldn\") pod \"7091e599-b67c-44d8-945c-329bac31dd6d\" (UID: \"7091e599-b67c-44d8-945c-329bac31dd6d\") " Feb 01 07:30:03 crc kubenswrapper[4650]: I0201 07:30:03.924930 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7091e599-b67c-44d8-945c-329bac31dd6d-config-volume" (OuterVolumeSpecName: "config-volume") pod "7091e599-b67c-44d8-945c-329bac31dd6d" (UID: "7091e599-b67c-44d8-945c-329bac31dd6d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:30:03 crc kubenswrapper[4650]: I0201 07:30:03.933783 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7091e599-b67c-44d8-945c-329bac31dd6d-kube-api-access-dlldn" (OuterVolumeSpecName: "kube-api-access-dlldn") pod "7091e599-b67c-44d8-945c-329bac31dd6d" (UID: "7091e599-b67c-44d8-945c-329bac31dd6d"). InnerVolumeSpecName "kube-api-access-dlldn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:30:03 crc kubenswrapper[4650]: I0201 07:30:03.934244 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7091e599-b67c-44d8-945c-329bac31dd6d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7091e599-b67c-44d8-945c-329bac31dd6d" (UID: "7091e599-b67c-44d8-945c-329bac31dd6d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:30:04 crc kubenswrapper[4650]: I0201 07:30:04.026533 4650 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7091e599-b67c-44d8-945c-329bac31dd6d-config-volume\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:04 crc kubenswrapper[4650]: I0201 07:30:04.026610 4650 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7091e599-b67c-44d8-945c-329bac31dd6d-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:04 crc kubenswrapper[4650]: I0201 07:30:04.026631 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlldn\" (UniqueName: \"kubernetes.io/projected/7091e599-b67c-44d8-945c-329bac31dd6d-kube-api-access-dlldn\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:04 crc kubenswrapper[4650]: I0201 07:30:04.338853 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" event={"ID":"7091e599-b67c-44d8-945c-329bac31dd6d","Type":"ContainerDied","Data":"3f68472b21b2a8e9a2e6c4348dc7769efbf9671008f11407fef4e62746c2d477"} Feb 01 07:30:04 crc kubenswrapper[4650]: I0201 07:30:04.340374 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f68472b21b2a8e9a2e6c4348dc7769efbf9671008f11407fef4e62746c2d477" Feb 01 07:30:04 crc kubenswrapper[4650]: I0201 07:30:04.338941 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz" Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.162176 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.162829 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.162937 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.164153 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"db279aeb24995ec5143fa01137b45dc7b7c1ab6084221190d10c8193ed14bc2e"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.164290 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://db279aeb24995ec5143fa01137b45dc7b7c1ab6084221190d10c8193ed14bc2e" gracePeriod=600 Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.364226 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="db279aeb24995ec5143fa01137b45dc7b7c1ab6084221190d10c8193ed14bc2e" exitCode=0 Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.364266 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"db279aeb24995ec5143fa01137b45dc7b7c1ab6084221190d10c8193ed14bc2e"} Feb 01 07:30:07 crc kubenswrapper[4650]: I0201 07:30:07.364296 4650 scope.go:117] "RemoveContainer" containerID="e9d5d2d618ee7bb6d53e7fc147dd772e827691b45eee3045ea38bcbb8c6d4da1" Feb 01 07:30:08 crc kubenswrapper[4650]: I0201 07:30:08.375691 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"e3b794505f123da84dff50f9a8a52e0b394a3a5d8569c0bb7517422e3f2965d9"} Feb 01 07:30:09 crc kubenswrapper[4650]: I0201 07:30:09.523747 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" podUID="a0e132bd-4673-48b5-9362-32781a1f9405" containerName="registry" containerID="cri-o://a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d" gracePeriod=30 Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.063973 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219295 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-registry-tls\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219382 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbctd\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-kube-api-access-gbctd\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219434 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a0e132bd-4673-48b5-9362-32781a1f9405-ca-trust-extracted\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219469 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a0e132bd-4673-48b5-9362-32781a1f9405-installation-pull-secrets\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219513 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-trusted-ca\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219739 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219820 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-bound-sa-token\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.219870 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-registry-certificates\") pod \"a0e132bd-4673-48b5-9362-32781a1f9405\" (UID: \"a0e132bd-4673-48b5-9362-32781a1f9405\") " Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.221714 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.226894 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.237305 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0e132bd-4673-48b5-9362-32781a1f9405-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.237336 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.239004 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.239989 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-kube-api-access-gbctd" (OuterVolumeSpecName: "kube-api-access-gbctd") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "kube-api-access-gbctd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.240349 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.268398 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0e132bd-4673-48b5-9362-32781a1f9405-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "a0e132bd-4673-48b5-9362-32781a1f9405" (UID: "a0e132bd-4673-48b5-9362-32781a1f9405"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.321684 4650 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.321737 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbctd\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-kube-api-access-gbctd\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.321759 4650 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a0e132bd-4673-48b5-9362-32781a1f9405-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.321778 4650 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a0e132bd-4673-48b5-9362-32781a1f9405-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.321798 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.321814 4650 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a0e132bd-4673-48b5-9362-32781a1f9405-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.321831 4650 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a0e132bd-4673-48b5-9362-32781a1f9405-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.390490 4650 generic.go:334] "Generic (PLEG): container finished" podID="a0e132bd-4673-48b5-9362-32781a1f9405" containerID="a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d" exitCode=0 Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.390548 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" event={"ID":"a0e132bd-4673-48b5-9362-32781a1f9405","Type":"ContainerDied","Data":"a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d"} Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.390585 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" event={"ID":"a0e132bd-4673-48b5-9362-32781a1f9405","Type":"ContainerDied","Data":"fe41ef17767a15d9d83e8618e1460b20fc2f62816cb4bcee0bbf23f44e0240dd"} Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.390601 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-crkwn" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.390612 4650 scope.go:117] "RemoveContainer" containerID="a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.415488 4650 scope.go:117] "RemoveContainer" containerID="a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d" Feb 01 07:30:10 crc kubenswrapper[4650]: E0201 07:30:10.416356 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d\": container with ID starting with a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d not found: ID does not exist" containerID="a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.416421 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d"} err="failed to get container status \"a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d\": rpc error: code = NotFound desc = could not find container \"a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d\": container with ID starting with a996c6df9aa5b49d95289d73a27adf66ec6c55896ff2846651c514af9b21ca0d not found: ID does not exist" Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.443213 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-crkwn"] Feb 01 07:30:10 crc kubenswrapper[4650]: I0201 07:30:10.453961 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-crkwn"] Feb 01 07:30:11 crc kubenswrapper[4650]: I0201 07:30:11.979143 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0e132bd-4673-48b5-9362-32781a1f9405" path="/var/lib/kubelet/pods/a0e132bd-4673-48b5-9362-32781a1f9405/volumes" Feb 01 07:32:07 crc kubenswrapper[4650]: I0201 07:32:07.161928 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:32:07 crc kubenswrapper[4650]: I0201 07:32:07.162853 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:32:37 crc kubenswrapper[4650]: I0201 07:32:37.161234 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:32:37 crc kubenswrapper[4650]: I0201 07:32:37.161852 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.161550 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.162181 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.162238 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.163128 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e3b794505f123da84dff50f9a8a52e0b394a3a5d8569c0bb7517422e3f2965d9"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.163184 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://e3b794505f123da84dff50f9a8a52e0b394a3a5d8569c0bb7517422e3f2965d9" gracePeriod=600 Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.682909 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="e3b794505f123da84dff50f9a8a52e0b394a3a5d8569c0bb7517422e3f2965d9" exitCode=0 Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.683000 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"e3b794505f123da84dff50f9a8a52e0b394a3a5d8569c0bb7517422e3f2965d9"} Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.683460 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"7e65f3cb8796ac73cb10f7e5fd38e53569d05a3301373f4ee87f64447301307a"} Feb 01 07:33:07 crc kubenswrapper[4650]: I0201 07:33:07.683489 4650 scope.go:117] "RemoveContainer" containerID="db279aeb24995ec5143fa01137b45dc7b7c1ab6084221190d10c8193ed14bc2e" Feb 01 07:33:59 crc kubenswrapper[4650]: I0201 07:33:59.996447 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn"] Feb 01 07:33:59 crc kubenswrapper[4650]: E0201 07:33:59.997082 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0e132bd-4673-48b5-9362-32781a1f9405" containerName="registry" Feb 01 07:33:59 crc kubenswrapper[4650]: I0201 07:33:59.997097 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e132bd-4673-48b5-9362-32781a1f9405" containerName="registry" Feb 01 07:33:59 crc kubenswrapper[4650]: E0201 07:33:59.997116 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7091e599-b67c-44d8-945c-329bac31dd6d" containerName="collect-profiles" Feb 01 07:33:59 crc kubenswrapper[4650]: I0201 07:33:59.997121 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7091e599-b67c-44d8-945c-329bac31dd6d" containerName="collect-profiles" Feb 01 07:33:59 crc kubenswrapper[4650]: I0201 07:33:59.997220 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7091e599-b67c-44d8-945c-329bac31dd6d" containerName="collect-profiles" Feb 01 07:33:59 crc kubenswrapper[4650]: I0201 07:33:59.997230 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0e132bd-4673-48b5-9362-32781a1f9405" containerName="registry" Feb 01 07:33:59 crc kubenswrapper[4650]: I0201 07:33:59.997564 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.001543 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.001543 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.007275 4650 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-j8765" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.012928 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn"] Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.018084 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-p2tvq"] Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.018773 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-p2tvq" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.030174 4650 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-wtsc7" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.036364 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-zs6dr"] Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.036998 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" Feb 01 07:34:00 crc kubenswrapper[4650]: W0201 07:34:00.040473 4650 reflector.go:561] object-"cert-manager"/"cert-manager-webhook-dockercfg-pb8ck": failed to list *v1.Secret: secrets "cert-manager-webhook-dockercfg-pb8ck" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "cert-manager": no relationship found between node 'crc' and this object Feb 01 07:34:00 crc kubenswrapper[4650]: E0201 07:34:00.040511 4650 reflector.go:158] "Unhandled Error" err="object-\"cert-manager\"/\"cert-manager-webhook-dockercfg-pb8ck\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cert-manager-webhook-dockercfg-pb8ck\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"cert-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.041261 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-p2tvq"] Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.060848 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-zs6dr"] Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.089168 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f5ph\" (UniqueName: \"kubernetes.io/projected/bf5de50c-0ff3-45fa-9888-13446bb0a4ab-kube-api-access-5f5ph\") pod \"cert-manager-cainjector-cf98fcc89-wg2pn\" (UID: \"bf5de50c-0ff3-45fa-9888-13446bb0a4ab\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.190658 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flwdk\" (UniqueName: \"kubernetes.io/projected/d4d25501-9155-4295-9f13-aeb46e745f85-kube-api-access-flwdk\") pod \"cert-manager-858654f9db-p2tvq\" (UID: \"d4d25501-9155-4295-9f13-aeb46e745f85\") " pod="cert-manager/cert-manager-858654f9db-p2tvq" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.190785 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f5ph\" (UniqueName: \"kubernetes.io/projected/bf5de50c-0ff3-45fa-9888-13446bb0a4ab-kube-api-access-5f5ph\") pod \"cert-manager-cainjector-cf98fcc89-wg2pn\" (UID: \"bf5de50c-0ff3-45fa-9888-13446bb0a4ab\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.190965 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55q4b\" (UniqueName: \"kubernetes.io/projected/ef362b17-83bc-4543-839b-1451ee91c2c2-kube-api-access-55q4b\") pod \"cert-manager-webhook-687f57d79b-zs6dr\" (UID: \"ef362b17-83bc-4543-839b-1451ee91c2c2\") " pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.208848 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f5ph\" (UniqueName: \"kubernetes.io/projected/bf5de50c-0ff3-45fa-9888-13446bb0a4ab-kube-api-access-5f5ph\") pod \"cert-manager-cainjector-cf98fcc89-wg2pn\" (UID: \"bf5de50c-0ff3-45fa-9888-13446bb0a4ab\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.294266 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flwdk\" (UniqueName: \"kubernetes.io/projected/d4d25501-9155-4295-9f13-aeb46e745f85-kube-api-access-flwdk\") pod \"cert-manager-858654f9db-p2tvq\" (UID: \"d4d25501-9155-4295-9f13-aeb46e745f85\") " pod="cert-manager/cert-manager-858654f9db-p2tvq" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.294343 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55q4b\" (UniqueName: \"kubernetes.io/projected/ef362b17-83bc-4543-839b-1451ee91c2c2-kube-api-access-55q4b\") pod \"cert-manager-webhook-687f57d79b-zs6dr\" (UID: \"ef362b17-83bc-4543-839b-1451ee91c2c2\") " pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.313525 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.332926 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flwdk\" (UniqueName: \"kubernetes.io/projected/d4d25501-9155-4295-9f13-aeb46e745f85-kube-api-access-flwdk\") pod \"cert-manager-858654f9db-p2tvq\" (UID: \"d4d25501-9155-4295-9f13-aeb46e745f85\") " pod="cert-manager/cert-manager-858654f9db-p2tvq" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.334744 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55q4b\" (UniqueName: \"kubernetes.io/projected/ef362b17-83bc-4543-839b-1451ee91c2c2-kube-api-access-55q4b\") pod \"cert-manager-webhook-687f57d79b-zs6dr\" (UID: \"ef362b17-83bc-4543-839b-1451ee91c2c2\") " pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.539504 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn"] Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.563893 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.629896 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-p2tvq" Feb 01 07:34:00 crc kubenswrapper[4650]: I0201 07:34:00.816327 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-p2tvq"] Feb 01 07:34:00 crc kubenswrapper[4650]: W0201 07:34:00.824174 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4d25501_9155_4295_9f13_aeb46e745f85.slice/crio-a2eeb7ff75b4329f7cdd354d56aa552907dc0a0ef968d23a02deb9d9a7294faa WatchSource:0}: Error finding container a2eeb7ff75b4329f7cdd354d56aa552907dc0a0ef968d23a02deb9d9a7294faa: Status 404 returned error can't find the container with id a2eeb7ff75b4329f7cdd354d56aa552907dc0a0ef968d23a02deb9d9a7294faa Feb 01 07:34:01 crc kubenswrapper[4650]: I0201 07:34:01.055594 4650 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-pb8ck" Feb 01 07:34:01 crc kubenswrapper[4650]: I0201 07:34:01.059715 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" Feb 01 07:34:01 crc kubenswrapper[4650]: I0201 07:34:01.091216 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" event={"ID":"bf5de50c-0ff3-45fa-9888-13446bb0a4ab","Type":"ContainerStarted","Data":"8fbeca4dbd4cb01bf191bddf717b74d59f9fecd58b2da0ef1b3107687b2ce19b"} Feb 01 07:34:01 crc kubenswrapper[4650]: I0201 07:34:01.093693 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-p2tvq" event={"ID":"d4d25501-9155-4295-9f13-aeb46e745f85","Type":"ContainerStarted","Data":"a2eeb7ff75b4329f7cdd354d56aa552907dc0a0ef968d23a02deb9d9a7294faa"} Feb 01 07:34:01 crc kubenswrapper[4650]: I0201 07:34:01.294681 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-zs6dr"] Feb 01 07:34:02 crc kubenswrapper[4650]: I0201 07:34:02.115015 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" event={"ID":"ef362b17-83bc-4543-839b-1451ee91c2c2","Type":"ContainerStarted","Data":"d78b64378050866be7a350800fccfd3ed16455fc2145f91e82a3a9f319576ceb"} Feb 01 07:34:05 crc kubenswrapper[4650]: I0201 07:34:05.138443 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" event={"ID":"ef362b17-83bc-4543-839b-1451ee91c2c2","Type":"ContainerStarted","Data":"b57e972130735d4c2162378cd913bc06b441b73199866f04d61889671733c2b0"} Feb 01 07:34:05 crc kubenswrapper[4650]: I0201 07:34:05.139685 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" Feb 01 07:34:05 crc kubenswrapper[4650]: I0201 07:34:05.141585 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" event={"ID":"bf5de50c-0ff3-45fa-9888-13446bb0a4ab","Type":"ContainerStarted","Data":"cedd4d4946c51a9a7d77f0d1b59d3c7ad7ad8c5f326d99e3fb2fc512bec8c5c6"} Feb 01 07:34:05 crc kubenswrapper[4650]: I0201 07:34:05.143770 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-p2tvq" event={"ID":"d4d25501-9155-4295-9f13-aeb46e745f85","Type":"ContainerStarted","Data":"d00e607295caf15bddf557dee105f58562a7bf5ca5c5d11e83e9faeab5173859"} Feb 01 07:34:05 crc kubenswrapper[4650]: I0201 07:34:05.158195 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" podStartSLOduration=1.957074867 podStartE2EDuration="5.158175885s" podCreationTimestamp="2026-02-01 07:34:00 +0000 UTC" firstStartedPulling="2026-02-01 07:34:01.337243384 +0000 UTC m=+640.060341629" lastFinishedPulling="2026-02-01 07:34:04.538344392 +0000 UTC m=+643.261442647" observedRunningTime="2026-02-01 07:34:05.155571205 +0000 UTC m=+643.878669460" watchObservedRunningTime="2026-02-01 07:34:05.158175885 +0000 UTC m=+643.881274140" Feb 01 07:34:05 crc kubenswrapper[4650]: I0201 07:34:05.175730 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-wg2pn" podStartSLOduration=2.3040566670000002 podStartE2EDuration="6.175711044s" podCreationTimestamp="2026-02-01 07:33:59 +0000 UTC" firstStartedPulling="2026-02-01 07:34:00.56362468 +0000 UTC m=+639.286722925" lastFinishedPulling="2026-02-01 07:34:04.435279027 +0000 UTC m=+643.158377302" observedRunningTime="2026-02-01 07:34:05.171797899 +0000 UTC m=+643.894896154" watchObservedRunningTime="2026-02-01 07:34:05.175711044 +0000 UTC m=+643.898809299" Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.780954 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-p2tvq" podStartSLOduration=7.175607899 podStartE2EDuration="10.780925154s" podCreationTimestamp="2026-02-01 07:33:59 +0000 UTC" firstStartedPulling="2026-02-01 07:34:00.826256292 +0000 UTC m=+639.549354537" lastFinishedPulling="2026-02-01 07:34:04.431573507 +0000 UTC m=+643.154671792" observedRunningTime="2026-02-01 07:34:05.207252207 +0000 UTC m=+643.930350492" watchObservedRunningTime="2026-02-01 07:34:09.780925154 +0000 UTC m=+648.504023439" Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.794498 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hm5cs"] Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.795364 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-controller" containerID="cri-o://8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" gracePeriod=30 Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.795390 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="nbdb" containerID="cri-o://7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" gracePeriod=30 Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.795590 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="northd" containerID="cri-o://b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" gracePeriod=30 Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.795698 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" gracePeriod=30 Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.795822 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-node" containerID="cri-o://1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" gracePeriod=30 Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.795910 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-acl-logging" containerID="cri-o://e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" gracePeriod=30 Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.796097 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="sbdb" containerID="cri-o://4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" gracePeriod=30 Feb 01 07:34:09 crc kubenswrapper[4650]: I0201 07:34:09.882156 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" containerID="cri-o://17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" gracePeriod=30 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.152762 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/3.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.156176 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovn-acl-logging/0.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.156636 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovn-controller/0.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.157006 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.194317 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovnkube-controller/3.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.206702 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovn-acl-logging/0.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.207909 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hm5cs_ef0e87ea-6edd-4e89-a09b-01f62f763ba1/ovn-controller/0.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209076 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" exitCode=0 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209099 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" exitCode=0 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209107 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" exitCode=0 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209115 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" exitCode=0 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209122 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" exitCode=0 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209129 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" exitCode=0 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209136 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" exitCode=143 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209144 4650 generic.go:334] "Generic (PLEG): container finished" podID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerID="8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" exitCode=143 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209182 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209205 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209216 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209225 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209233 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209242 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209253 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209262 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209267 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209272 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209277 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209282 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209288 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209293 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209299 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209306 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209313 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209320 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209325 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209331 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209336 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209342 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209347 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209352 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209357 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209362 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209369 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209378 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209384 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209389 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209394 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209399 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209403 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209408 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209413 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209418 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209422 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209429 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" event={"ID":"ef0e87ea-6edd-4e89-a09b-01f62f763ba1","Type":"ContainerDied","Data":"86946137a5915665e78dfe221b65ae5970b1f8c9b74fda5a719c222c704f43d3"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209437 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209444 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209449 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209454 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209459 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209464 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209469 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209473 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209478 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209483 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209496 4650 scope.go:117] "RemoveContainer" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.209616 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hm5cs" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.219066 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/2.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.219632 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/1.log" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.219664 4650 generic.go:334] "Generic (PLEG): container finished" podID="e408ebb2-07fc-4317-92d4-1316ece830fb" containerID="18d71ea0d1e0ca8b54e4bd06f8df0d55bbe23fe80bbaf025dcf0468ad1399f99" exitCode=2 Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.219724 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerDied","Data":"18d71ea0d1e0ca8b54e4bd06f8df0d55bbe23fe80bbaf025dcf0468ad1399f99"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.219748 4650 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012"} Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.220493 4650 scope.go:117] "RemoveContainer" containerID="18d71ea0d1e0ca8b54e4bd06f8df0d55bbe23fe80bbaf025dcf0468ad1399f99" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.221161 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-k6xtw_openshift-multus(e408ebb2-07fc-4317-92d4-1316ece830fb)\"" pod="openshift-multus/multus-k6xtw" podUID="e408ebb2-07fc-4317-92d4-1316ece830fb" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231495 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-xq7xx"] Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231728 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="nbdb" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231752 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="nbdb" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231764 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="northd" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231772 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="northd" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231785 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-acl-logging" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231793 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-acl-logging" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231802 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231810 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231820 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231827 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231836 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kubecfg-setup" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231844 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kubecfg-setup" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231854 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-node" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231862 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-node" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231876 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231884 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231894 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="sbdb" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231901 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="sbdb" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231912 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231919 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231929 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231936 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231948 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-ovn-metrics" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231955 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-ovn-metrics" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.231979 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.231986 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232117 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-ovn-metrics" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232131 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232138 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232148 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-acl-logging" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232156 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="nbdb" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232165 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="northd" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232174 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="sbdb" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232181 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232189 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232197 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovn-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232203 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="kube-rbac-proxy-node" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.232360 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" containerName="ovnkube-controller" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.234384 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.240416 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.266308 4650 scope.go:117] "RemoveContainer" containerID="4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.281825 4650 scope.go:117] "RemoveContainer" containerID="7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.299245 4650 scope.go:117] "RemoveContainer" containerID="b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.316361 4650 scope.go:117] "RemoveContainer" containerID="ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.337702 4650 scope.go:117] "RemoveContainer" containerID="1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.352339 4650 scope.go:117] "RemoveContainer" containerID="e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356433 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-log-socket\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356474 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-ovn-kubernetes\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356501 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-systemd\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356532 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-node-log\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356552 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-etc-openvswitch\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356548 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-log-socket" (OuterVolumeSpecName: "log-socket") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356580 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovn-node-metrics-cert\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356571 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356600 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-bin\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356609 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-node-log" (OuterVolumeSpecName: "node-log") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356619 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-netns\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356638 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356663 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-config\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356677 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-openvswitch\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356695 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-env-overrides\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356731 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvhzk\" (UniqueName: \"kubernetes.io/projected/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-kube-api-access-nvhzk\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356745 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-ovn\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357079 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357121 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357143 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357181 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357218 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357240 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357286 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.356762 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-slash\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357547 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-netd\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357548 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357586 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-systemd-units\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357590 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-slash" (OuterVolumeSpecName: "host-slash") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357602 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-kubelet\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357622 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-var-lib-openvswitch\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357696 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-script-lib\") pod \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\" (UID: \"ef0e87ea-6edd-4e89-a09b-01f62f763ba1\") " Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357623 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357641 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357761 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-etc-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357654 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357782 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/52d07d17-a860-4303-9048-d4da8a875ad3-ovn-node-metrics-cert\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357663 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.357803 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-kubelet\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358195 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358573 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-run-ovn-kubernetes\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358618 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-ovnkube-config\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358649 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-cni-netd\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358675 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-run-netns\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358699 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358769 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-ovn\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.358993 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-systemd-units\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359067 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-var-lib-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359195 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-node-log\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359299 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-ovnkube-script-lib\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359366 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzc48\" (UniqueName: \"kubernetes.io/projected/52d07d17-a860-4303-9048-d4da8a875ad3-kube-api-access-hzc48\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359441 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359523 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-env-overrides\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359649 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-systemd\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359734 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-slash\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359845 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-log-socket\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.359999 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-cni-bin\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360305 4650 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360409 4650 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360506 4650 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360601 4650 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360676 4650 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360752 4650 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-slash\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360831 4650 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-netd\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360901 4650 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-systemd-units\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.360962 4650 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-kubelet\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361039 4650 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361104 4650 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361170 4650 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-log-socket\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361277 4650 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361346 4650 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-node-log\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361411 4650 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361480 4650 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-cni-bin\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.361544 4650 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-host-run-netns\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.362211 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-kube-api-access-nvhzk" (OuterVolumeSpecName: "kube-api-access-nvhzk") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "kube-api-access-nvhzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.364478 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.369264 4650 scope.go:117] "RemoveContainer" containerID="8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.372287 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "ef0e87ea-6edd-4e89-a09b-01f62f763ba1" (UID: "ef0e87ea-6edd-4e89-a09b-01f62f763ba1"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.382264 4650 scope.go:117] "RemoveContainer" containerID="31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.395112 4650 scope.go:117] "RemoveContainer" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.395501 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": container with ID starting with 17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e not found: ID does not exist" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.395548 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} err="failed to get container status \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": rpc error: code = NotFound desc = could not find container \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": container with ID starting with 17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.395576 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.395920 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": container with ID starting with 30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28 not found: ID does not exist" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.395940 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} err="failed to get container status \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": rpc error: code = NotFound desc = could not find container \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": container with ID starting with 30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.395951 4650 scope.go:117] "RemoveContainer" containerID="4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.396452 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": container with ID starting with 4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004 not found: ID does not exist" containerID="4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.396502 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} err="failed to get container status \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": rpc error: code = NotFound desc = could not find container \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": container with ID starting with 4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.396531 4650 scope.go:117] "RemoveContainer" containerID="7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.396812 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": container with ID starting with 7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba not found: ID does not exist" containerID="7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.396849 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} err="failed to get container status \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": rpc error: code = NotFound desc = could not find container \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": container with ID starting with 7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.396874 4650 scope.go:117] "RemoveContainer" containerID="b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.397196 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": container with ID starting with b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e not found: ID does not exist" containerID="b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.397221 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} err="failed to get container status \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": rpc error: code = NotFound desc = could not find container \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": container with ID starting with b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.397236 4650 scope.go:117] "RemoveContainer" containerID="ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.397474 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": container with ID starting with ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad not found: ID does not exist" containerID="ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.397502 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} err="failed to get container status \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": rpc error: code = NotFound desc = could not find container \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": container with ID starting with ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.397519 4650 scope.go:117] "RemoveContainer" containerID="1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.397741 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": container with ID starting with 1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5 not found: ID does not exist" containerID="1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.397764 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} err="failed to get container status \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": rpc error: code = NotFound desc = could not find container \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": container with ID starting with 1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.397777 4650 scope.go:117] "RemoveContainer" containerID="e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.398051 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": container with ID starting with e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5 not found: ID does not exist" containerID="e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398083 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} err="failed to get container status \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": rpc error: code = NotFound desc = could not find container \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": container with ID starting with e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398101 4650 scope.go:117] "RemoveContainer" containerID="8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.398364 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": container with ID starting with 8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1 not found: ID does not exist" containerID="8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398392 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} err="failed to get container status \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": rpc error: code = NotFound desc = could not find container \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": container with ID starting with 8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398407 4650 scope.go:117] "RemoveContainer" containerID="31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0" Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.398640 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": container with ID starting with 31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0 not found: ID does not exist" containerID="31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398666 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} err="failed to get container status \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": rpc error: code = NotFound desc = could not find container \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": container with ID starting with 31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398683 4650 scope.go:117] "RemoveContainer" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398912 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} err="failed to get container status \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": rpc error: code = NotFound desc = could not find container \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": container with ID starting with 17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.398932 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.399189 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} err="failed to get container status \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": rpc error: code = NotFound desc = could not find container \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": container with ID starting with 30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.399210 4650 scope.go:117] "RemoveContainer" containerID="4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.399530 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} err="failed to get container status \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": rpc error: code = NotFound desc = could not find container \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": container with ID starting with 4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.399555 4650 scope.go:117] "RemoveContainer" containerID="7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.399804 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} err="failed to get container status \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": rpc error: code = NotFound desc = could not find container \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": container with ID starting with 7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.399830 4650 scope.go:117] "RemoveContainer" containerID="b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400082 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} err="failed to get container status \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": rpc error: code = NotFound desc = could not find container \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": container with ID starting with b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400101 4650 scope.go:117] "RemoveContainer" containerID="ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400323 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} err="failed to get container status \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": rpc error: code = NotFound desc = could not find container \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": container with ID starting with ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400342 4650 scope.go:117] "RemoveContainer" containerID="1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400521 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} err="failed to get container status \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": rpc error: code = NotFound desc = could not find container \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": container with ID starting with 1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400536 4650 scope.go:117] "RemoveContainer" containerID="e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400741 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} err="failed to get container status \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": rpc error: code = NotFound desc = could not find container \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": container with ID starting with e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400765 4650 scope.go:117] "RemoveContainer" containerID="8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400967 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} err="failed to get container status \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": rpc error: code = NotFound desc = could not find container \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": container with ID starting with 8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.400985 4650 scope.go:117] "RemoveContainer" containerID="31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401167 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} err="failed to get container status \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": rpc error: code = NotFound desc = could not find container \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": container with ID starting with 31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401184 4650 scope.go:117] "RemoveContainer" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401321 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} err="failed to get container status \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": rpc error: code = NotFound desc = could not find container \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": container with ID starting with 17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401334 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401484 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} err="failed to get container status \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": rpc error: code = NotFound desc = could not find container \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": container with ID starting with 30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401501 4650 scope.go:117] "RemoveContainer" containerID="4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401670 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} err="failed to get container status \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": rpc error: code = NotFound desc = could not find container \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": container with ID starting with 4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401686 4650 scope.go:117] "RemoveContainer" containerID="7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401822 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} err="failed to get container status \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": rpc error: code = NotFound desc = could not find container \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": container with ID starting with 7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401838 4650 scope.go:117] "RemoveContainer" containerID="b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401982 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} err="failed to get container status \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": rpc error: code = NotFound desc = could not find container \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": container with ID starting with b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.401997 4650 scope.go:117] "RemoveContainer" containerID="ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402248 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} err="failed to get container status \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": rpc error: code = NotFound desc = could not find container \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": container with ID starting with ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402265 4650 scope.go:117] "RemoveContainer" containerID="1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402419 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} err="failed to get container status \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": rpc error: code = NotFound desc = could not find container \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": container with ID starting with 1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402435 4650 scope.go:117] "RemoveContainer" containerID="e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402584 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} err="failed to get container status \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": rpc error: code = NotFound desc = could not find container \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": container with ID starting with e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402600 4650 scope.go:117] "RemoveContainer" containerID="8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402787 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} err="failed to get container status \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": rpc error: code = NotFound desc = could not find container \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": container with ID starting with 8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402803 4650 scope.go:117] "RemoveContainer" containerID="31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402945 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} err="failed to get container status \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": rpc error: code = NotFound desc = could not find container \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": container with ID starting with 31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.402962 4650 scope.go:117] "RemoveContainer" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403124 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} err="failed to get container status \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": rpc error: code = NotFound desc = could not find container \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": container with ID starting with 17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403140 4650 scope.go:117] "RemoveContainer" containerID="30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403293 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28"} err="failed to get container status \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": rpc error: code = NotFound desc = could not find container \"30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28\": container with ID starting with 30a6bae33be01cedb0bc332d252100cc0e075d25bec23e5ff3e6a41dba3a5b28 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403309 4650 scope.go:117] "RemoveContainer" containerID="4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403504 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004"} err="failed to get container status \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": rpc error: code = NotFound desc = could not find container \"4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004\": container with ID starting with 4f687c29e4dae215dde1a7588046b0dab1720bb87bab04935ef1a09cb6391004 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403520 4650 scope.go:117] "RemoveContainer" containerID="7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403657 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba"} err="failed to get container status \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": rpc error: code = NotFound desc = could not find container \"7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba\": container with ID starting with 7d5ed0d7b203ab8dc0041def8d2c7dad8b6017b52076b84c7b7793b53a5222ba not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403681 4650 scope.go:117] "RemoveContainer" containerID="b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403829 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e"} err="failed to get container status \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": rpc error: code = NotFound desc = could not find container \"b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e\": container with ID starting with b145be2935d71d65536e744a6842c1da4cb67ea6d8b2a1dba6c5e6f4348f754e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403848 4650 scope.go:117] "RemoveContainer" containerID="ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.403996 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad"} err="failed to get container status \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": rpc error: code = NotFound desc = could not find container \"ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad\": container with ID starting with ca03abaaef41bbc6f4072d51eb9efd359473befd8dc15eddb7237c4e954cbcad not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404013 4650 scope.go:117] "RemoveContainer" containerID="1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404228 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5"} err="failed to get container status \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": rpc error: code = NotFound desc = could not find container \"1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5\": container with ID starting with 1416325fc6793d45b87617ea8563192f712dfba93d448237807d42507a299ba5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404244 4650 scope.go:117] "RemoveContainer" containerID="e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404421 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5"} err="failed to get container status \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": rpc error: code = NotFound desc = could not find container \"e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5\": container with ID starting with e1b061b9f4b472c29a56b611cddb06f7fe821ea5b39cebc0a8d5a92d3622f7e5 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404437 4650 scope.go:117] "RemoveContainer" containerID="8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404593 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1"} err="failed to get container status \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": rpc error: code = NotFound desc = could not find container \"8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1\": container with ID starting with 8bf2501d717a37cd686df5395d6b439fc341858fff38384e76622c141633a2c1 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404605 4650 scope.go:117] "RemoveContainer" containerID="31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404946 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0"} err="failed to get container status \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": rpc error: code = NotFound desc = could not find container \"31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0\": container with ID starting with 31b3af71e55aa77b426b6c9dcafa68c076a68f0151f9a5413adb47fd71c0d3a0 not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.404966 4650 scope.go:117] "RemoveContainer" containerID="17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.405227 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e"} err="failed to get container status \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": rpc error: code = NotFound desc = could not find container \"17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e\": container with ID starting with 17c34192f0af8b853cb5ff88b2cc96fa8b64d2028baed863a4e821905c611f3e not found: ID does not exist" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463047 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-kubelet\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463105 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-run-ovn-kubernetes\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463131 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-ovnkube-config\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463148 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-cni-netd\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463164 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-run-netns\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463179 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463175 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-kubelet\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463204 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-ovn\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463223 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-systemd-units\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463237 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-var-lib-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463248 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-run-ovn-kubernetes\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463254 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-node-log\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463295 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-node-log\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463302 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-ovnkube-script-lib\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463321 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzc48\" (UniqueName: \"kubernetes.io/projected/52d07d17-a860-4303-9048-d4da8a875ad3-kube-api-access-hzc48\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463337 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-cni-netd\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463350 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463359 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-run-netns\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463376 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-env-overrides\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463380 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463400 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-ovn\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463424 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-systemd-units\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463427 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-systemd\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463445 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-slash\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463465 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463473 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-log-socket\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463532 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-cni-bin\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463572 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-etc-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463589 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/52d07d17-a860-4303-9048-d4da8a875ad3-ovn-node-metrics-cert\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463641 4650 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-run-systemd\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463652 4650 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463663 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvhzk\" (UniqueName: \"kubernetes.io/projected/ef0e87ea-6edd-4e89-a09b-01f62f763ba1-kube-api-access-nvhzk\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.464018 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-ovnkube-config\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.463445 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-var-lib-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.464171 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-ovnkube-script-lib\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.464211 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-run-systemd\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.464239 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-slash\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.464260 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-host-cni-bin\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.464283 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-log-socket\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.464303 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/52d07d17-a860-4303-9048-d4da8a875ad3-etc-openvswitch\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.465091 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/52d07d17-a860-4303-9048-d4da8a875ad3-env-overrides\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.466596 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/52d07d17-a860-4303-9048-d4da8a875ad3-ovn-node-metrics-cert\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.485786 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzc48\" (UniqueName: \"kubernetes.io/projected/52d07d17-a860-4303-9048-d4da8a875ad3-kube-api-access-hzc48\") pod \"ovnkube-node-xq7xx\" (UID: \"52d07d17-a860-4303-9048-d4da8a875ad3\") " pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.547795 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.567223 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hm5cs"] Feb 01 07:34:10 crc kubenswrapper[4650]: I0201 07:34:10.574782 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hm5cs"] Feb 01 07:34:10 crc kubenswrapper[4650]: E0201 07:34:10.754044 4650 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52d07d17_a860_4303_9048_d4da8a875ad3.slice/crio-conmon-210a49671fb7b940729642dfb0e809f1598dcda7b4041f2e048665164f64c4dc.scope\": RecentStats: unable to find data in memory cache]" Feb 01 07:34:11 crc kubenswrapper[4650]: I0201 07:34:11.065100 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-zs6dr" Feb 01 07:34:11 crc kubenswrapper[4650]: I0201 07:34:11.229211 4650 generic.go:334] "Generic (PLEG): container finished" podID="52d07d17-a860-4303-9048-d4da8a875ad3" containerID="210a49671fb7b940729642dfb0e809f1598dcda7b4041f2e048665164f64c4dc" exitCode=0 Feb 01 07:34:11 crc kubenswrapper[4650]: I0201 07:34:11.229268 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerDied","Data":"210a49671fb7b940729642dfb0e809f1598dcda7b4041f2e048665164f64c4dc"} Feb 01 07:34:11 crc kubenswrapper[4650]: I0201 07:34:11.229803 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"f19de98309ebe757f0186c47acb9327169c2f766fce2eea3ef48c3d425507c3e"} Feb 01 07:34:11 crc kubenswrapper[4650]: I0201 07:34:11.976659 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef0e87ea-6edd-4e89-a09b-01f62f763ba1" path="/var/lib/kubelet/pods/ef0e87ea-6edd-4e89-a09b-01f62f763ba1/volumes" Feb 01 07:34:12 crc kubenswrapper[4650]: I0201 07:34:12.240744 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"097b9ebf43b4ddf4f18ba8679cb1cf35d86d0ff9db4736156efc2663f968b022"} Feb 01 07:34:12 crc kubenswrapper[4650]: I0201 07:34:12.240789 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"21ee70496a9f9a10e7d7befb3b921909053ba982aca014d42b767d7e1a2d8016"} Feb 01 07:34:12 crc kubenswrapper[4650]: I0201 07:34:12.240802 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"1288738be36b973cabf0e6ae558662bb6090f32ac6c030e0c5cfaca64d5cd7e5"} Feb 01 07:34:12 crc kubenswrapper[4650]: I0201 07:34:12.240816 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"15954352357dac9c5671da479f63020fc7263da4ba7ad7372097ecf8b71ac2f1"} Feb 01 07:34:12 crc kubenswrapper[4650]: I0201 07:34:12.240826 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"218046c8422ed9a319c009dbe1b6d1758df6707a767eb87e99b193da782da3e4"} Feb 01 07:34:12 crc kubenswrapper[4650]: I0201 07:34:12.240836 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"e4c8346e5998905d83f68ef7cd89e84e0d0de56e7cbdc258ea1a1a7058d6cebe"} Feb 01 07:34:15 crc kubenswrapper[4650]: I0201 07:34:15.274732 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"7356e1ad41f60b2ae346c1da8eee9417c4da129a98871773cb745d8177fec3d6"} Feb 01 07:34:17 crc kubenswrapper[4650]: I0201 07:34:17.290835 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" event={"ID":"52d07d17-a860-4303-9048-d4da8a875ad3","Type":"ContainerStarted","Data":"28e584ccb7c8d1ff4b7e07900bc7c6c23cfc30d5e0dc3eec62a87af5146cf663"} Feb 01 07:34:17 crc kubenswrapper[4650]: I0201 07:34:17.291380 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:17 crc kubenswrapper[4650]: I0201 07:34:17.365108 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" podStartSLOduration=7.365084381 podStartE2EDuration="7.365084381s" podCreationTimestamp="2026-02-01 07:34:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:34:17.362093143 +0000 UTC m=+656.085191408" watchObservedRunningTime="2026-02-01 07:34:17.365084381 +0000 UTC m=+656.088182676" Feb 01 07:34:17 crc kubenswrapper[4650]: I0201 07:34:17.368136 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:18 crc kubenswrapper[4650]: I0201 07:34:18.298075 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:18 crc kubenswrapper[4650]: I0201 07:34:18.298128 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:18 crc kubenswrapper[4650]: I0201 07:34:18.340953 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:22 crc kubenswrapper[4650]: I0201 07:34:22.418165 4650 scope.go:117] "RemoveContainer" containerID="c3bba94e2fc70e50b46639439a12b34db68a19e8dd937f5f2cdad28f0a7ac012" Feb 01 07:34:23 crc kubenswrapper[4650]: I0201 07:34:23.340195 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/2.log" Feb 01 07:34:24 crc kubenswrapper[4650]: I0201 07:34:24.965390 4650 scope.go:117] "RemoveContainer" containerID="18d71ea0d1e0ca8b54e4bd06f8df0d55bbe23fe80bbaf025dcf0468ad1399f99" Feb 01 07:34:24 crc kubenswrapper[4650]: E0201 07:34:24.965734 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-k6xtw_openshift-multus(e408ebb2-07fc-4317-92d4-1316ece830fb)\"" pod="openshift-multus/multus-k6xtw" podUID="e408ebb2-07fc-4317-92d4-1316ece830fb" Feb 01 07:34:39 crc kubenswrapper[4650]: I0201 07:34:39.965803 4650 scope.go:117] "RemoveContainer" containerID="18d71ea0d1e0ca8b54e4bd06f8df0d55bbe23fe80bbaf025dcf0468ad1399f99" Feb 01 07:34:40 crc kubenswrapper[4650]: I0201 07:34:40.460208 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k6xtw_e408ebb2-07fc-4317-92d4-1316ece830fb/kube-multus/2.log" Feb 01 07:34:40 crc kubenswrapper[4650]: I0201 07:34:40.461258 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k6xtw" event={"ID":"e408ebb2-07fc-4317-92d4-1316ece830fb","Type":"ContainerStarted","Data":"a04c221c2cdeeb3f65bb04c30541d08a8aff5f6dd370ebf7ff45aed4de8971da"} Feb 01 07:34:40 crc kubenswrapper[4650]: I0201 07:34:40.586153 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-xq7xx" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.600908 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr"] Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.602226 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.604800 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.621455 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr"] Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.801159 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vr5zm\" (UniqueName: \"kubernetes.io/projected/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-kube-api-access-vr5zm\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.801282 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.801386 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.903488 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.903629 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vr5zm\" (UniqueName: \"kubernetes.io/projected/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-kube-api-access-vr5zm\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.903673 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.904652 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.904671 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:47 crc kubenswrapper[4650]: I0201 07:34:47.940425 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vr5zm\" (UniqueName: \"kubernetes.io/projected/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-kube-api-access-vr5zm\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:48 crc kubenswrapper[4650]: I0201 07:34:48.227724 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:48 crc kubenswrapper[4650]: I0201 07:34:48.464238 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr"] Feb 01 07:34:48 crc kubenswrapper[4650]: I0201 07:34:48.515608 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" event={"ID":"f647c2e5-f410-4aa3-b10b-1ef14f7702ba","Type":"ContainerStarted","Data":"9fab0b1f5a1d706e5cbf82981f152999e8f10524626ec38ac8e7a80328b76a75"} Feb 01 07:34:49 crc kubenswrapper[4650]: I0201 07:34:49.527388 4650 generic.go:334] "Generic (PLEG): container finished" podID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerID="1f552776a9a719dfc3215d2f6731934970cf3e7d76e3a4bee2c5a9ade14f271a" exitCode=0 Feb 01 07:34:49 crc kubenswrapper[4650]: I0201 07:34:49.527598 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" event={"ID":"f647c2e5-f410-4aa3-b10b-1ef14f7702ba","Type":"ContainerDied","Data":"1f552776a9a719dfc3215d2f6731934970cf3e7d76e3a4bee2c5a9ade14f271a"} Feb 01 07:34:51 crc kubenswrapper[4650]: I0201 07:34:51.542621 4650 generic.go:334] "Generic (PLEG): container finished" podID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerID="75b368f99a347d6998e2dda3d6691421032090a00f3971e9dec4108f1cc7b6e8" exitCode=0 Feb 01 07:34:51 crc kubenswrapper[4650]: I0201 07:34:51.543088 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" event={"ID":"f647c2e5-f410-4aa3-b10b-1ef14f7702ba","Type":"ContainerDied","Data":"75b368f99a347d6998e2dda3d6691421032090a00f3971e9dec4108f1cc7b6e8"} Feb 01 07:34:52 crc kubenswrapper[4650]: I0201 07:34:52.551699 4650 generic.go:334] "Generic (PLEG): container finished" podID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerID="76c680155de0d04512ca44cc86f083a37d819e3a0fe8218a52cfd9242ca4c041" exitCode=0 Feb 01 07:34:52 crc kubenswrapper[4650]: I0201 07:34:52.551792 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" event={"ID":"f647c2e5-f410-4aa3-b10b-1ef14f7702ba","Type":"ContainerDied","Data":"76c680155de0d04512ca44cc86f083a37d819e3a0fe8218a52cfd9242ca4c041"} Feb 01 07:34:53 crc kubenswrapper[4650]: I0201 07:34:53.865523 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:53 crc kubenswrapper[4650]: I0201 07:34:53.986103 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vr5zm\" (UniqueName: \"kubernetes.io/projected/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-kube-api-access-vr5zm\") pod \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " Feb 01 07:34:53 crc kubenswrapper[4650]: I0201 07:34:53.986192 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-bundle\") pod \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " Feb 01 07:34:53 crc kubenswrapper[4650]: I0201 07:34:53.986254 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-util\") pod \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\" (UID: \"f647c2e5-f410-4aa3-b10b-1ef14f7702ba\") " Feb 01 07:34:53 crc kubenswrapper[4650]: I0201 07:34:53.987460 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-bundle" (OuterVolumeSpecName: "bundle") pod "f647c2e5-f410-4aa3-b10b-1ef14f7702ba" (UID: "f647c2e5-f410-4aa3-b10b-1ef14f7702ba"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:34:53 crc kubenswrapper[4650]: I0201 07:34:53.997518 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-kube-api-access-vr5zm" (OuterVolumeSpecName: "kube-api-access-vr5zm") pod "f647c2e5-f410-4aa3-b10b-1ef14f7702ba" (UID: "f647c2e5-f410-4aa3-b10b-1ef14f7702ba"). InnerVolumeSpecName "kube-api-access-vr5zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:34:53 crc kubenswrapper[4650]: I0201 07:34:53.999070 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-util" (OuterVolumeSpecName: "util") pod "f647c2e5-f410-4aa3-b10b-1ef14f7702ba" (UID: "f647c2e5-f410-4aa3-b10b-1ef14f7702ba"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:34:54 crc kubenswrapper[4650]: I0201 07:34:54.088720 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vr5zm\" (UniqueName: \"kubernetes.io/projected/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-kube-api-access-vr5zm\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:54 crc kubenswrapper[4650]: I0201 07:34:54.088946 4650 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:54 crc kubenswrapper[4650]: I0201 07:34:54.089001 4650 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f647c2e5-f410-4aa3-b10b-1ef14f7702ba-util\") on node \"crc\" DevicePath \"\"" Feb 01 07:34:54 crc kubenswrapper[4650]: I0201 07:34:54.568057 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" event={"ID":"f647c2e5-f410-4aa3-b10b-1ef14f7702ba","Type":"ContainerDied","Data":"9fab0b1f5a1d706e5cbf82981f152999e8f10524626ec38ac8e7a80328b76a75"} Feb 01 07:34:54 crc kubenswrapper[4650]: I0201 07:34:54.568103 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9fab0b1f5a1d706e5cbf82981f152999e8f10524626ec38ac8e7a80328b76a75" Feb 01 07:34:54 crc kubenswrapper[4650]: I0201 07:34:54.568325 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.680179 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-gnrf8"] Feb 01 07:34:56 crc kubenswrapper[4650]: E0201 07:34:56.680545 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerName="pull" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.680566 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerName="pull" Feb 01 07:34:56 crc kubenswrapper[4650]: E0201 07:34:56.680587 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerName="util" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.680596 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerName="util" Feb 01 07:34:56 crc kubenswrapper[4650]: E0201 07:34:56.680613 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerName="extract" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.680621 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerName="extract" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.680760 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f647c2e5-f410-4aa3-b10b-1ef14f7702ba" containerName="extract" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.681372 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.688680 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.689126 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-qtppf" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.689272 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.699412 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-gnrf8"] Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.726294 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz75f\" (UniqueName: \"kubernetes.io/projected/497239ce-dda7-47b2-8a9f-d8b14b4f05a9-kube-api-access-nz75f\") pod \"nmstate-operator-646758c888-gnrf8\" (UID: \"497239ce-dda7-47b2-8a9f-d8b14b4f05a9\") " pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.827432 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz75f\" (UniqueName: \"kubernetes.io/projected/497239ce-dda7-47b2-8a9f-d8b14b4f05a9-kube-api-access-nz75f\") pod \"nmstate-operator-646758c888-gnrf8\" (UID: \"497239ce-dda7-47b2-8a9f-d8b14b4f05a9\") " pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.851341 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz75f\" (UniqueName: \"kubernetes.io/projected/497239ce-dda7-47b2-8a9f-d8b14b4f05a9-kube-api-access-nz75f\") pod \"nmstate-operator-646758c888-gnrf8\" (UID: \"497239ce-dda7-47b2-8a9f-d8b14b4f05a9\") " pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" Feb 01 07:34:56 crc kubenswrapper[4650]: I0201 07:34:56.996146 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" Feb 01 07:34:57 crc kubenswrapper[4650]: I0201 07:34:57.477148 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-gnrf8"] Feb 01 07:34:57 crc kubenswrapper[4650]: W0201 07:34:57.484864 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod497239ce_dda7_47b2_8a9f_d8b14b4f05a9.slice/crio-71afc4d94f3b764f1a69e87fefa0c47aca5b7dccdf3a11063adaa7e03c0c3b34 WatchSource:0}: Error finding container 71afc4d94f3b764f1a69e87fefa0c47aca5b7dccdf3a11063adaa7e03c0c3b34: Status 404 returned error can't find the container with id 71afc4d94f3b764f1a69e87fefa0c47aca5b7dccdf3a11063adaa7e03c0c3b34 Feb 01 07:34:57 crc kubenswrapper[4650]: I0201 07:34:57.584868 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" event={"ID":"497239ce-dda7-47b2-8a9f-d8b14b4f05a9","Type":"ContainerStarted","Data":"71afc4d94f3b764f1a69e87fefa0c47aca5b7dccdf3a11063adaa7e03c0c3b34"} Feb 01 07:35:00 crc kubenswrapper[4650]: I0201 07:35:00.601321 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" event={"ID":"497239ce-dda7-47b2-8a9f-d8b14b4f05a9","Type":"ContainerStarted","Data":"eb95b06ff39599dcd9c567d5e32e39b875cfdea38cd74cf477c228fb3ca29a7e"} Feb 01 07:35:00 crc kubenswrapper[4650]: I0201 07:35:00.625935 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-gnrf8" podStartSLOduration=2.527710902 podStartE2EDuration="4.625915612s" podCreationTimestamp="2026-02-01 07:34:56 +0000 UTC" firstStartedPulling="2026-02-01 07:34:57.487638202 +0000 UTC m=+696.210736447" lastFinishedPulling="2026-02-01 07:34:59.585842912 +0000 UTC m=+698.308941157" observedRunningTime="2026-02-01 07:35:00.6166207 +0000 UTC m=+699.339718965" watchObservedRunningTime="2026-02-01 07:35:00.625915612 +0000 UTC m=+699.349013867" Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.161344 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.161973 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.912617 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-dl4px"] Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.914461 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.917449 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-64kg8" Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.937465 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-dl4px"] Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.941689 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq"] Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.942611 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.944824 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.984685 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-6grf4"] Feb 01 07:35:07 crc kubenswrapper[4650]: I0201 07:35:07.985880 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.003191 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq"] Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.012213 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-dbus-socket\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.012307 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-ovs-socket\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.012344 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-nmstate-lock\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.012400 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lvtx\" (UniqueName: \"kubernetes.io/projected/e49e2e7c-42b1-4bf3-821e-c369f1651bae-kube-api-access-9lvtx\") pod \"nmstate-webhook-8474b5b9d8-t6mzq\" (UID: \"e49e2e7c-42b1-4bf3-821e-c369f1651bae\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.012434 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkgzt\" (UniqueName: \"kubernetes.io/projected/1d8abd69-727b-45f0-ab34-9f94c14dc6b7-kube-api-access-lkgzt\") pod \"nmstate-metrics-54757c584b-dl4px\" (UID: \"1d8abd69-727b-45f0-ab34-9f94c14dc6b7\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.012494 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e49e2e7c-42b1-4bf3-821e-c369f1651bae-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-t6mzq\" (UID: \"e49e2e7c-42b1-4bf3-821e-c369f1651bae\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.012523 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6499f\" (UniqueName: \"kubernetes.io/projected/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-kube-api-access-6499f\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.105961 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg"] Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.106730 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.109508 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.110153 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.110301 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-stjks" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.114817 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e49e2e7c-42b1-4bf3-821e-c369f1651bae-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-t6mzq\" (UID: \"e49e2e7c-42b1-4bf3-821e-c369f1651bae\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.114897 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6499f\" (UniqueName: \"kubernetes.io/projected/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-kube-api-access-6499f\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.114945 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqr7c\" (UniqueName: \"kubernetes.io/projected/94a95cbd-86c8-4a06-8690-83b9b2451f5a-kube-api-access-pqr7c\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.114969 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a95cbd-86c8-4a06-8690-83b9b2451f5a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115005 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-dbus-socket\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: E0201 07:35:08.115107 4650 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115254 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-ovs-socket\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115311 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-nmstate-lock\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115309 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-ovs-socket\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: E0201 07:35:08.115389 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e49e2e7c-42b1-4bf3-821e-c369f1651bae-tls-key-pair podName:e49e2e7c-42b1-4bf3-821e-c369f1651bae nodeName:}" failed. No retries permitted until 2026-02-01 07:35:08.615367312 +0000 UTC m=+707.338465767 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/e49e2e7c-42b1-4bf3-821e-c369f1651bae-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-t6mzq" (UID: "e49e2e7c-42b1-4bf3-821e-c369f1651bae") : secret "openshift-nmstate-webhook" not found Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115394 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-nmstate-lock\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115420 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lvtx\" (UniqueName: \"kubernetes.io/projected/e49e2e7c-42b1-4bf3-821e-c369f1651bae-kube-api-access-9lvtx\") pod \"nmstate-webhook-8474b5b9d8-t6mzq\" (UID: \"e49e2e7c-42b1-4bf3-821e-c369f1651bae\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115476 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-dbus-socket\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115480 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkgzt\" (UniqueName: \"kubernetes.io/projected/1d8abd69-727b-45f0-ab34-9f94c14dc6b7-kube-api-access-lkgzt\") pod \"nmstate-metrics-54757c584b-dl4px\" (UID: \"1d8abd69-727b-45f0-ab34-9f94c14dc6b7\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.115515 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/94a95cbd-86c8-4a06-8690-83b9b2451f5a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.130613 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg"] Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.141575 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lvtx\" (UniqueName: \"kubernetes.io/projected/e49e2e7c-42b1-4bf3-821e-c369f1651bae-kube-api-access-9lvtx\") pod \"nmstate-webhook-8474b5b9d8-t6mzq\" (UID: \"e49e2e7c-42b1-4bf3-821e-c369f1651bae\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.142863 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6499f\" (UniqueName: \"kubernetes.io/projected/3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d-kube-api-access-6499f\") pod \"nmstate-handler-6grf4\" (UID: \"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d\") " pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.152123 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkgzt\" (UniqueName: \"kubernetes.io/projected/1d8abd69-727b-45f0-ab34-9f94c14dc6b7-kube-api-access-lkgzt\") pod \"nmstate-metrics-54757c584b-dl4px\" (UID: \"1d8abd69-727b-45f0-ab34-9f94c14dc6b7\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.216317 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/94a95cbd-86c8-4a06-8690-83b9b2451f5a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.216411 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqr7c\" (UniqueName: \"kubernetes.io/projected/94a95cbd-86c8-4a06-8690-83b9b2451f5a-kube-api-access-pqr7c\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.216431 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a95cbd-86c8-4a06-8690-83b9b2451f5a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: E0201 07:35:08.216550 4650 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Feb 01 07:35:08 crc kubenswrapper[4650]: E0201 07:35:08.216605 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/94a95cbd-86c8-4a06-8690-83b9b2451f5a-plugin-serving-cert podName:94a95cbd-86c8-4a06-8690-83b9b2451f5a nodeName:}" failed. No retries permitted until 2026-02-01 07:35:08.716590413 +0000 UTC m=+707.439688658 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/94a95cbd-86c8-4a06-8690-83b9b2451f5a-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-ks8zg" (UID: "94a95cbd-86c8-4a06-8690-83b9b2451f5a") : secret "plugin-serving-cert" not found Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.217210 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/94a95cbd-86c8-4a06-8690-83b9b2451f5a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.238060 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqr7c\" (UniqueName: \"kubernetes.io/projected/94a95cbd-86c8-4a06-8690-83b9b2451f5a-kube-api-access-pqr7c\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.243696 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.303396 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.347826 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-779c478558-5k7lj"] Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.348918 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.380989 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-779c478558-5k7lj"] Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.418183 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-serving-cert\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.418254 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-config\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.418278 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-oauth-config\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.418307 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q65ll\" (UniqueName: \"kubernetes.io/projected/efa255ec-e639-4461-ab1b-b8d59c9b5f86-kube-api-access-q65ll\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.418326 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-oauth-serving-cert\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.418360 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-trusted-ca-bundle\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.418398 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-service-ca\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.524092 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-serving-cert\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.524167 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-config\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.524196 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-oauth-config\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.524232 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q65ll\" (UniqueName: \"kubernetes.io/projected/efa255ec-e639-4461-ab1b-b8d59c9b5f86-kube-api-access-q65ll\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.524254 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-oauth-serving-cert\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.524294 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-trusted-ca-bundle\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.524336 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-service-ca\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.527898 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-config\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.528027 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-trusted-ca-bundle\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.528081 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-oauth-serving-cert\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.529695 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-serving-cert\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.530696 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/efa255ec-e639-4461-ab1b-b8d59c9b5f86-service-ca\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.535512 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/efa255ec-e639-4461-ab1b-b8d59c9b5f86-console-oauth-config\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.541596 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q65ll\" (UniqueName: \"kubernetes.io/projected/efa255ec-e639-4461-ab1b-b8d59c9b5f86-kube-api-access-q65ll\") pod \"console-779c478558-5k7lj\" (UID: \"efa255ec-e639-4461-ab1b-b8d59c9b5f86\") " pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.625650 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e49e2e7c-42b1-4bf3-821e-c369f1651bae-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-t6mzq\" (UID: \"e49e2e7c-42b1-4bf3-821e-c369f1651bae\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.628716 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e49e2e7c-42b1-4bf3-821e-c369f1651bae-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-t6mzq\" (UID: \"e49e2e7c-42b1-4bf3-821e-c369f1651bae\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.659693 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-6grf4" event={"ID":"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d","Type":"ContainerStarted","Data":"8dcc730018f9a97e331359e83457c0ada7c4b77b730c6ebc7242d536c0de2ecd"} Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.684243 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.727453 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-dl4px"] Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.727967 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a95cbd-86c8-4a06-8690-83b9b2451f5a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.731206 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/94a95cbd-86c8-4a06-8690-83b9b2451f5a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-ks8zg\" (UID: \"94a95cbd-86c8-4a06-8690-83b9b2451f5a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:08 crc kubenswrapper[4650]: W0201 07:35:08.736684 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d8abd69_727b_45f0_ab34_9f94c14dc6b7.slice/crio-0c8b4c942df1be4c0958b6177bb2883f428c2faa045fae4e7686fd77ec0cf6b7 WatchSource:0}: Error finding container 0c8b4c942df1be4c0958b6177bb2883f428c2faa045fae4e7686fd77ec0cf6b7: Status 404 returned error can't find the container with id 0c8b4c942df1be4c0958b6177bb2883f428c2faa045fae4e7686fd77ec0cf6b7 Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.857872 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:08 crc kubenswrapper[4650]: I0201 07:35:08.890535 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-779c478558-5k7lj"] Feb 01 07:35:08 crc kubenswrapper[4650]: W0201 07:35:08.895520 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podefa255ec_e639_4461_ab1b_b8d59c9b5f86.slice/crio-ca05e4ae13740ff1a7861081d7ed8c4db1b1666ad6f30191c3979fc6b4af2a5a WatchSource:0}: Error finding container ca05e4ae13740ff1a7861081d7ed8c4db1b1666ad6f30191c3979fc6b4af2a5a: Status 404 returned error can't find the container with id ca05e4ae13740ff1a7861081d7ed8c4db1b1666ad6f30191c3979fc6b4af2a5a Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.027565 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.083531 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq"] Feb 01 07:35:09 crc kubenswrapper[4650]: W0201 07:35:09.113285 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode49e2e7c_42b1_4bf3_821e_c369f1651bae.slice/crio-0a0dfef2403463c351187d2335dff41faa0eb4a96315e093b989442975795199 WatchSource:0}: Error finding container 0a0dfef2403463c351187d2335dff41faa0eb4a96315e093b989442975795199: Status 404 returned error can't find the container with id 0a0dfef2403463c351187d2335dff41faa0eb4a96315e093b989442975795199 Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.253898 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg"] Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.669178 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-779c478558-5k7lj" event={"ID":"efa255ec-e639-4461-ab1b-b8d59c9b5f86","Type":"ContainerStarted","Data":"12c90b8a4b45083471cf6ed9b99ad66383214636197c59b85b4ab0918f7e7260"} Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.669233 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-779c478558-5k7lj" event={"ID":"efa255ec-e639-4461-ab1b-b8d59c9b5f86","Type":"ContainerStarted","Data":"ca05e4ae13740ff1a7861081d7ed8c4db1b1666ad6f30191c3979fc6b4af2a5a"} Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.673843 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" event={"ID":"1d8abd69-727b-45f0-ab34-9f94c14dc6b7","Type":"ContainerStarted","Data":"0c8b4c942df1be4c0958b6177bb2883f428c2faa045fae4e7686fd77ec0cf6b7"} Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.676150 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" event={"ID":"94a95cbd-86c8-4a06-8690-83b9b2451f5a","Type":"ContainerStarted","Data":"30d7a04bd95bb584d0bb43d0c8410db9b20f2200d6847c56d9fe7c490afd9879"} Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.677533 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" event={"ID":"e49e2e7c-42b1-4bf3-821e-c369f1651bae","Type":"ContainerStarted","Data":"0a0dfef2403463c351187d2335dff41faa0eb4a96315e093b989442975795199"} Feb 01 07:35:09 crc kubenswrapper[4650]: I0201 07:35:09.692083 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-779c478558-5k7lj" podStartSLOduration=1.692064519 podStartE2EDuration="1.692064519s" podCreationTimestamp="2026-02-01 07:35:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:35:09.690369545 +0000 UTC m=+708.413467820" watchObservedRunningTime="2026-02-01 07:35:09.692064519 +0000 UTC m=+708.415162784" Feb 01 07:35:12 crc kubenswrapper[4650]: I0201 07:35:12.699762 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" event={"ID":"e49e2e7c-42b1-4bf3-821e-c369f1651bae","Type":"ContainerStarted","Data":"2a38ac93675b74089fd92e011178b14fe9b117668ad3d7b554c70d4f887f0a8a"} Feb 01 07:35:12 crc kubenswrapper[4650]: I0201 07:35:12.700526 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:12 crc kubenswrapper[4650]: I0201 07:35:12.703438 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-6grf4" event={"ID":"3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d","Type":"ContainerStarted","Data":"78ca03b0201539c9fbd24d27ddc2519e318964b9b2d6b76a0d05bf650c432fdf"} Feb 01 07:35:12 crc kubenswrapper[4650]: I0201 07:35:12.703653 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:12 crc kubenswrapper[4650]: I0201 07:35:12.706840 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" event={"ID":"1d8abd69-727b-45f0-ab34-9f94c14dc6b7","Type":"ContainerStarted","Data":"3918a206dbb48173810316dd262d5f21e37d422cd9f9a15f40002520303f0093"} Feb 01 07:35:12 crc kubenswrapper[4650]: I0201 07:35:12.730099 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" podStartSLOduration=3.043444161 podStartE2EDuration="5.729103027s" podCreationTimestamp="2026-02-01 07:35:07 +0000 UTC" firstStartedPulling="2026-02-01 07:35:09.133619846 +0000 UTC m=+707.856718091" lastFinishedPulling="2026-02-01 07:35:11.819278712 +0000 UTC m=+710.542376957" observedRunningTime="2026-02-01 07:35:12.722524736 +0000 UTC m=+711.445622991" watchObservedRunningTime="2026-02-01 07:35:12.729103027 +0000 UTC m=+711.452201272" Feb 01 07:35:12 crc kubenswrapper[4650]: I0201 07:35:12.750103 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-6grf4" podStartSLOduration=2.536787634 podStartE2EDuration="5.750083613s" podCreationTimestamp="2026-02-01 07:35:07 +0000 UTC" firstStartedPulling="2026-02-01 07:35:08.33074311 +0000 UTC m=+707.053841355" lastFinishedPulling="2026-02-01 07:35:11.544039089 +0000 UTC m=+710.267137334" observedRunningTime="2026-02-01 07:35:12.746067078 +0000 UTC m=+711.469165343" watchObservedRunningTime="2026-02-01 07:35:12.750083613 +0000 UTC m=+711.473181878" Feb 01 07:35:13 crc kubenswrapper[4650]: I0201 07:35:13.723878 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" event={"ID":"94a95cbd-86c8-4a06-8690-83b9b2451f5a","Type":"ContainerStarted","Data":"fec7380ff8e95da61b351fe18b92bf139bccbcb1749aafb12ab8df7bdf06ba04"} Feb 01 07:35:13 crc kubenswrapper[4650]: I0201 07:35:13.740006 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-ks8zg" podStartSLOduration=2.128167992 podStartE2EDuration="5.739991069s" podCreationTimestamp="2026-02-01 07:35:08 +0000 UTC" firstStartedPulling="2026-02-01 07:35:09.261003446 +0000 UTC m=+707.984101691" lastFinishedPulling="2026-02-01 07:35:12.872826523 +0000 UTC m=+711.595924768" observedRunningTime="2026-02-01 07:35:13.73925026 +0000 UTC m=+712.462348505" watchObservedRunningTime="2026-02-01 07:35:13.739991069 +0000 UTC m=+712.463089314" Feb 01 07:35:14 crc kubenswrapper[4650]: I0201 07:35:14.733978 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" event={"ID":"1d8abd69-727b-45f0-ab34-9f94c14dc6b7","Type":"ContainerStarted","Data":"dc2d0a4bcb0dd9ae3187d341232eefdbe778d28320a1d3522c70abd43aa92fd4"} Feb 01 07:35:14 crc kubenswrapper[4650]: I0201 07:35:14.760815 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-dl4px" podStartSLOduration=2.449226417 podStartE2EDuration="7.760792598s" podCreationTimestamp="2026-02-01 07:35:07 +0000 UTC" firstStartedPulling="2026-02-01 07:35:08.738576889 +0000 UTC m=+707.461675144" lastFinishedPulling="2026-02-01 07:35:14.05014308 +0000 UTC m=+712.773241325" observedRunningTime="2026-02-01 07:35:14.755405368 +0000 UTC m=+713.478503663" watchObservedRunningTime="2026-02-01 07:35:14.760792598 +0000 UTC m=+713.483890873" Feb 01 07:35:18 crc kubenswrapper[4650]: I0201 07:35:18.338349 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-6grf4" Feb 01 07:35:18 crc kubenswrapper[4650]: I0201 07:35:18.685291 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:18 crc kubenswrapper[4650]: I0201 07:35:18.685351 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:18 crc kubenswrapper[4650]: I0201 07:35:18.693245 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:18 crc kubenswrapper[4650]: I0201 07:35:18.769721 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-779c478558-5k7lj" Feb 01 07:35:18 crc kubenswrapper[4650]: I0201 07:35:18.862375 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-snf8v"] Feb 01 07:35:28 crc kubenswrapper[4650]: I0201 07:35:28.864247 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-t6mzq" Feb 01 07:35:37 crc kubenswrapper[4650]: I0201 07:35:37.161487 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:35:37 crc kubenswrapper[4650]: I0201 07:35:37.162267 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.814391 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd"] Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.816631 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.818673 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.839607 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd"] Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.892525 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhcxh\" (UniqueName: \"kubernetes.io/projected/b1bc0d07-354f-441c-bc7b-b1160c88303b-kube-api-access-mhcxh\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.892598 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.892688 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.993822 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.994165 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhcxh\" (UniqueName: \"kubernetes.io/projected/b1bc0d07-354f-441c-bc7b-b1160c88303b-kube-api-access-mhcxh\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.994342 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.994557 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:42 crc kubenswrapper[4650]: I0201 07:35:42.994948 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:43 crc kubenswrapper[4650]: I0201 07:35:43.018402 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhcxh\" (UniqueName: \"kubernetes.io/projected/b1bc0d07-354f-441c-bc7b-b1160c88303b-kube-api-access-mhcxh\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:43 crc kubenswrapper[4650]: I0201 07:35:43.143418 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:43 crc kubenswrapper[4650]: I0201 07:35:43.389065 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd"] Feb 01 07:35:43 crc kubenswrapper[4650]: I0201 07:35:43.937680 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-snf8v" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerName="console" containerID="cri-o://aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c" gracePeriod=15 Feb 01 07:35:43 crc kubenswrapper[4650]: I0201 07:35:43.951014 4650 generic.go:334] "Generic (PLEG): container finished" podID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerID="f10a06f5dbba8d6d88246873f8f9c2c09a641a904f4facb00d09e889cfd875ee" exitCode=0 Feb 01 07:35:43 crc kubenswrapper[4650]: I0201 07:35:43.951101 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" event={"ID":"b1bc0d07-354f-441c-bc7b-b1160c88303b","Type":"ContainerDied","Data":"f10a06f5dbba8d6d88246873f8f9c2c09a641a904f4facb00d09e889cfd875ee"} Feb 01 07:35:43 crc kubenswrapper[4650]: I0201 07:35:43.951728 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" event={"ID":"b1bc0d07-354f-441c-bc7b-b1160c88303b","Type":"ContainerStarted","Data":"caab66662e273d62976a0d78c7e4ed6faaf2833bbbc8b25d53cb18411752ec13"} Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.362489 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-snf8v_2298718f-d9f4-4714-acbb-01739d0c7b62/console/0.log" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.362660 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.413197 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-oauth-config\") pod \"2298718f-d9f4-4714-acbb-01739d0c7b62\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.414759 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-console-config\") pod \"2298718f-d9f4-4714-acbb-01739d0c7b62\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.414806 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-service-ca\") pod \"2298718f-d9f4-4714-acbb-01739d0c7b62\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.414889 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-trusted-ca-bundle\") pod \"2298718f-d9f4-4714-acbb-01739d0c7b62\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.414924 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-oauth-serving-cert\") pod \"2298718f-d9f4-4714-acbb-01739d0c7b62\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.414972 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndtd6\" (UniqueName: \"kubernetes.io/projected/2298718f-d9f4-4714-acbb-01739d0c7b62-kube-api-access-ndtd6\") pod \"2298718f-d9f4-4714-acbb-01739d0c7b62\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.415005 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-serving-cert\") pod \"2298718f-d9f4-4714-acbb-01739d0c7b62\" (UID: \"2298718f-d9f4-4714-acbb-01739d0c7b62\") " Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.416548 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-console-config" (OuterVolumeSpecName: "console-config") pod "2298718f-d9f4-4714-acbb-01739d0c7b62" (UID: "2298718f-d9f4-4714-acbb-01739d0c7b62"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.416791 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-service-ca" (OuterVolumeSpecName: "service-ca") pod "2298718f-d9f4-4714-acbb-01739d0c7b62" (UID: "2298718f-d9f4-4714-acbb-01739d0c7b62"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.417457 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "2298718f-d9f4-4714-acbb-01739d0c7b62" (UID: "2298718f-d9f4-4714-acbb-01739d0c7b62"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.417715 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "2298718f-d9f4-4714-acbb-01739d0c7b62" (UID: "2298718f-d9f4-4714-acbb-01739d0c7b62"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.439763 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "2298718f-d9f4-4714-acbb-01739d0c7b62" (UID: "2298718f-d9f4-4714-acbb-01739d0c7b62"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.439861 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2298718f-d9f4-4714-acbb-01739d0c7b62-kube-api-access-ndtd6" (OuterVolumeSpecName: "kube-api-access-ndtd6") pod "2298718f-d9f4-4714-acbb-01739d0c7b62" (UID: "2298718f-d9f4-4714-acbb-01739d0c7b62"). InnerVolumeSpecName "kube-api-access-ndtd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.442421 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "2298718f-d9f4-4714-acbb-01739d0c7b62" (UID: "2298718f-d9f4-4714-acbb-01739d0c7b62"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.516583 4650 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.516611 4650 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.516620 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndtd6\" (UniqueName: \"kubernetes.io/projected/2298718f-d9f4-4714-acbb-01739d0c7b62-kube-api-access-ndtd6\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.516630 4650 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.516638 4650 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2298718f-d9f4-4714-acbb-01739d0c7b62-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.516646 4650 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-console-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.516654 4650 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2298718f-d9f4-4714-acbb-01739d0c7b62-service-ca\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.960219 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-snf8v_2298718f-d9f4-4714-acbb-01739d0c7b62/console/0.log" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.960573 4650 generic.go:334] "Generic (PLEG): container finished" podID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerID="aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c" exitCode=2 Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.960603 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-snf8v" event={"ID":"2298718f-d9f4-4714-acbb-01739d0c7b62","Type":"ContainerDied","Data":"aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c"} Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.960631 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-snf8v" event={"ID":"2298718f-d9f4-4714-acbb-01739d0c7b62","Type":"ContainerDied","Data":"8cf3c8759ed91762a8c0ae98690b5c75f66d56cf440cb7b927ac226006c6c09f"} Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.960672 4650 scope.go:117] "RemoveContainer" containerID="aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.960739 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-snf8v" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.984328 4650 scope.go:117] "RemoveContainer" containerID="aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c" Feb 01 07:35:44 crc kubenswrapper[4650]: E0201 07:35:44.985544 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c\": container with ID starting with aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c not found: ID does not exist" containerID="aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c" Feb 01 07:35:44 crc kubenswrapper[4650]: I0201 07:35:44.985663 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c"} err="failed to get container status \"aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c\": rpc error: code = NotFound desc = could not find container \"aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c\": container with ID starting with aadd71fedd3a995669472fc356dc47ffed709dcdf7f190b3f9fc0ed51ecf307c not found: ID does not exist" Feb 01 07:35:45 crc kubenswrapper[4650]: I0201 07:35:45.003062 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-snf8v"] Feb 01 07:35:45 crc kubenswrapper[4650]: I0201 07:35:45.006493 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-snf8v"] Feb 01 07:35:45 crc kubenswrapper[4650]: I0201 07:35:45.972066 4650 generic.go:334] "Generic (PLEG): container finished" podID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerID="53aac28b9de9415bde8090fe59d602d04d372b7596cf078e7594659a8cc80a18" exitCode=0 Feb 01 07:35:45 crc kubenswrapper[4650]: I0201 07:35:45.980100 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" path="/var/lib/kubelet/pods/2298718f-d9f4-4714-acbb-01739d0c7b62/volumes" Feb 01 07:35:45 crc kubenswrapper[4650]: I0201 07:35:45.981255 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" event={"ID":"b1bc0d07-354f-441c-bc7b-b1160c88303b","Type":"ContainerDied","Data":"53aac28b9de9415bde8090fe59d602d04d372b7596cf078e7594659a8cc80a18"} Feb 01 07:35:46 crc kubenswrapper[4650]: I0201 07:35:46.989161 4650 generic.go:334] "Generic (PLEG): container finished" podID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerID="685c882b509bc76c02fdba2817f252e3380a648f15a7419611c2d58cafbe3ef2" exitCode=0 Feb 01 07:35:46 crc kubenswrapper[4650]: I0201 07:35:46.989381 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" event={"ID":"b1bc0d07-354f-441c-bc7b-b1160c88303b","Type":"ContainerDied","Data":"685c882b509bc76c02fdba2817f252e3380a648f15a7419611c2d58cafbe3ef2"} Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.250965 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.267643 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhcxh\" (UniqueName: \"kubernetes.io/projected/b1bc0d07-354f-441c-bc7b-b1160c88303b-kube-api-access-mhcxh\") pod \"b1bc0d07-354f-441c-bc7b-b1160c88303b\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.267762 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-util\") pod \"b1bc0d07-354f-441c-bc7b-b1160c88303b\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.267794 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-bundle\") pod \"b1bc0d07-354f-441c-bc7b-b1160c88303b\" (UID: \"b1bc0d07-354f-441c-bc7b-b1160c88303b\") " Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.269101 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-bundle" (OuterVolumeSpecName: "bundle") pod "b1bc0d07-354f-441c-bc7b-b1160c88303b" (UID: "b1bc0d07-354f-441c-bc7b-b1160c88303b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.277673 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1bc0d07-354f-441c-bc7b-b1160c88303b-kube-api-access-mhcxh" (OuterVolumeSpecName: "kube-api-access-mhcxh") pod "b1bc0d07-354f-441c-bc7b-b1160c88303b" (UID: "b1bc0d07-354f-441c-bc7b-b1160c88303b"). InnerVolumeSpecName "kube-api-access-mhcxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.286669 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-util" (OuterVolumeSpecName: "util") pod "b1bc0d07-354f-441c-bc7b-b1160c88303b" (UID: "b1bc0d07-354f-441c-bc7b-b1160c88303b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.369067 4650 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-util\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.369115 4650 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1bc0d07-354f-441c-bc7b-b1160c88303b-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:48 crc kubenswrapper[4650]: I0201 07:35:48.369129 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhcxh\" (UniqueName: \"kubernetes.io/projected/b1bc0d07-354f-441c-bc7b-b1160c88303b-kube-api-access-mhcxh\") on node \"crc\" DevicePath \"\"" Feb 01 07:35:49 crc kubenswrapper[4650]: I0201 07:35:49.007998 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" event={"ID":"b1bc0d07-354f-441c-bc7b-b1160c88303b","Type":"ContainerDied","Data":"caab66662e273d62976a0d78c7e4ed6faaf2833bbbc8b25d53cb18411752ec13"} Feb 01 07:35:49 crc kubenswrapper[4650]: I0201 07:35:49.008080 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="caab66662e273d62976a0d78c7e4ed6faaf2833bbbc8b25d53cb18411752ec13" Feb 01 07:35:49 crc kubenswrapper[4650]: I0201 07:35:49.008143 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.889184 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9"] Feb 01 07:35:57 crc kubenswrapper[4650]: E0201 07:35:57.890713 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerName="util" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.890804 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerName="util" Feb 01 07:35:57 crc kubenswrapper[4650]: E0201 07:35:57.890895 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerName="pull" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.890971 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerName="pull" Feb 01 07:35:57 crc kubenswrapper[4650]: E0201 07:35:57.891097 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerName="console" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.891167 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerName="console" Feb 01 07:35:57 crc kubenswrapper[4650]: E0201 07:35:57.891220 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerName="extract" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.891275 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerName="extract" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.891422 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="2298718f-d9f4-4714-acbb-01739d0c7b62" containerName="console" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.891479 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1bc0d07-354f-441c-bc7b-b1160c88303b" containerName="extract" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.891940 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.904233 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-bbbrp" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.904478 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.905138 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.905638 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.905958 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Feb 01 07:35:57 crc kubenswrapper[4650]: I0201 07:35:57.907412 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9"] Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.003131 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-apiservice-cert\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.003420 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb79s\" (UniqueName: \"kubernetes.io/projected/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-kube-api-access-pb79s\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.003537 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-webhook-cert\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.104233 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-apiservice-cert\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.104305 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb79s\" (UniqueName: \"kubernetes.io/projected/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-kube-api-access-pb79s\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.104335 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-webhook-cert\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.112276 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-webhook-cert\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.121992 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-apiservice-cert\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.125920 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb79s\" (UniqueName: \"kubernetes.io/projected/c969ae59-bacb-4a3e-8849-11b28fcc5bb0-kube-api-access-pb79s\") pod \"metallb-operator-controller-manager-84bfdcb548-gw4d9\" (UID: \"c969ae59-bacb-4a3e-8849-11b28fcc5bb0\") " pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.205269 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.287126 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8"] Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.287968 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.292139 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.292345 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.292447 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-8lwr4" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.306580 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e3570c01-0443-47c6-9b10-dc810c49a308-apiservice-cert\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.306852 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e3570c01-0443-47c6-9b10-dc810c49a308-webhook-cert\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.307348 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs5fs\" (UniqueName: \"kubernetes.io/projected/e3570c01-0443-47c6-9b10-dc810c49a308-kube-api-access-bs5fs\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.316447 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8"] Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.409772 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e3570c01-0443-47c6-9b10-dc810c49a308-apiservice-cert\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.409820 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e3570c01-0443-47c6-9b10-dc810c49a308-webhook-cert\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.409853 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs5fs\" (UniqueName: \"kubernetes.io/projected/e3570c01-0443-47c6-9b10-dc810c49a308-kube-api-access-bs5fs\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.421502 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e3570c01-0443-47c6-9b10-dc810c49a308-apiservice-cert\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.422013 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e3570c01-0443-47c6-9b10-dc810c49a308-webhook-cert\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.437233 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs5fs\" (UniqueName: \"kubernetes.io/projected/e3570c01-0443-47c6-9b10-dc810c49a308-kube-api-access-bs5fs\") pod \"metallb-operator-webhook-server-6dc7659c5c-9s5g8\" (UID: \"e3570c01-0443-47c6-9b10-dc810c49a308\") " pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.543666 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9"] Feb 01 07:35:58 crc kubenswrapper[4650]: W0201 07:35:58.552430 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc969ae59_bacb_4a3e_8849_11b28fcc5bb0.slice/crio-bd2782247d5f2f8466b56ebe61775b548548208b23f322e26dc8454ab3d14afd WatchSource:0}: Error finding container bd2782247d5f2f8466b56ebe61775b548548208b23f322e26dc8454ab3d14afd: Status 404 returned error can't find the container with id bd2782247d5f2f8466b56ebe61775b548548208b23f322e26dc8454ab3d14afd Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.601868 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:35:58 crc kubenswrapper[4650]: I0201 07:35:58.915433 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8"] Feb 01 07:35:59 crc kubenswrapper[4650]: I0201 07:35:59.059809 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" event={"ID":"e3570c01-0443-47c6-9b10-dc810c49a308","Type":"ContainerStarted","Data":"f2c30cabb9c791cc72b341af0c9eb3a2f0a2180269cde16ae6ee436c0da75979"} Feb 01 07:35:59 crc kubenswrapper[4650]: I0201 07:35:59.061629 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" event={"ID":"c969ae59-bacb-4a3e-8849-11b28fcc5bb0","Type":"ContainerStarted","Data":"bd2782247d5f2f8466b56ebe61775b548548208b23f322e26dc8454ab3d14afd"} Feb 01 07:36:00 crc kubenswrapper[4650]: I0201 07:36:00.918205 4650 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 01 07:36:02 crc kubenswrapper[4650]: I0201 07:36:02.083582 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" event={"ID":"c969ae59-bacb-4a3e-8849-11b28fcc5bb0","Type":"ContainerStarted","Data":"d711da1ea063f4ec068f338df747759d0722b950f1107d892ea8409590eb6b1d"} Feb 01 07:36:02 crc kubenswrapper[4650]: I0201 07:36:02.085229 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:36:02 crc kubenswrapper[4650]: I0201 07:36:02.118951 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" podStartSLOduration=1.869659721 podStartE2EDuration="5.118919977s" podCreationTimestamp="2026-02-01 07:35:57 +0000 UTC" firstStartedPulling="2026-02-01 07:35:58.555813345 +0000 UTC m=+757.278911590" lastFinishedPulling="2026-02-01 07:36:01.805073601 +0000 UTC m=+760.528171846" observedRunningTime="2026-02-01 07:36:02.114628748 +0000 UTC m=+760.837726993" watchObservedRunningTime="2026-02-01 07:36:02.118919977 +0000 UTC m=+760.842018222" Feb 01 07:36:05 crc kubenswrapper[4650]: I0201 07:36:05.099501 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" event={"ID":"e3570c01-0443-47c6-9b10-dc810c49a308","Type":"ContainerStarted","Data":"5d6fc68cd498487d1c02a42ea7b5a28ee10710ec8ed9364b1bc2639954196a85"} Feb 01 07:36:05 crc kubenswrapper[4650]: I0201 07:36:05.101049 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:36:05 crc kubenswrapper[4650]: I0201 07:36:05.139262 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" podStartSLOduration=1.6416514100000001 podStartE2EDuration="7.13924771s" podCreationTimestamp="2026-02-01 07:35:58 +0000 UTC" firstStartedPulling="2026-02-01 07:35:58.930343559 +0000 UTC m=+757.653441804" lastFinishedPulling="2026-02-01 07:36:04.427939859 +0000 UTC m=+763.151038104" observedRunningTime="2026-02-01 07:36:05.138121311 +0000 UTC m=+763.861219556" watchObservedRunningTime="2026-02-01 07:36:05.13924771 +0000 UTC m=+763.862345955" Feb 01 07:36:07 crc kubenswrapper[4650]: I0201 07:36:07.161426 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:36:07 crc kubenswrapper[4650]: I0201 07:36:07.161530 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:36:07 crc kubenswrapper[4650]: I0201 07:36:07.161591 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:36:07 crc kubenswrapper[4650]: I0201 07:36:07.162496 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7e65f3cb8796ac73cb10f7e5fd38e53569d05a3301373f4ee87f64447301307a"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:36:07 crc kubenswrapper[4650]: I0201 07:36:07.162589 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://7e65f3cb8796ac73cb10f7e5fd38e53569d05a3301373f4ee87f64447301307a" gracePeriod=600 Feb 01 07:36:08 crc kubenswrapper[4650]: I0201 07:36:08.117850 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="7e65f3cb8796ac73cb10f7e5fd38e53569d05a3301373f4ee87f64447301307a" exitCode=0 Feb 01 07:36:08 crc kubenswrapper[4650]: I0201 07:36:08.118443 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"7e65f3cb8796ac73cb10f7e5fd38e53569d05a3301373f4ee87f64447301307a"} Feb 01 07:36:08 crc kubenswrapper[4650]: I0201 07:36:08.118470 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"a4559927c25b5172e0bb51589b156030237e4552bdad01ea0a510262dabc0be0"} Feb 01 07:36:08 crc kubenswrapper[4650]: I0201 07:36:08.118484 4650 scope.go:117] "RemoveContainer" containerID="e3b794505f123da84dff50f9a8a52e0b394a3a5d8569c0bb7517422e3f2965d9" Feb 01 07:36:18 crc kubenswrapper[4650]: I0201 07:36:18.608800 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6dc7659c5c-9s5g8" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.208799 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-84bfdcb548-gw4d9" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.886263 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-dk5xx"] Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.889380 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.892203 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.892655 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5vrvt" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.893344 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.899068 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8"] Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.900207 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.903549 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.921671 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8"] Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994206 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-sockets\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994252 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qh7l\" (UniqueName: \"kubernetes.io/projected/ef83d5c0-5353-4e94-854d-e34141ac2982-kube-api-access-9qh7l\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcsl8\" (UID: \"ef83d5c0-5353-4e94-854d-e34141ac2982\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994280 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-conf\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994311 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-reloader\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994334 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics-certs\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994383 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf42l\" (UniqueName: \"kubernetes.io/projected/cf9c9412-f2f2-490f-ab66-3d2bd543d519-kube-api-access-wf42l\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994404 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ef83d5c0-5353-4e94-854d-e34141ac2982-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcsl8\" (UID: \"ef83d5c0-5353-4e94-854d-e34141ac2982\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994429 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:38 crc kubenswrapper[4650]: I0201 07:36:38.994443 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-startup\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.027803 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-d8c5z"] Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.033597 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.036191 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.036364 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.036541 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.036683 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-d89s2" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.051224 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-27zgr"] Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.053240 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.056158 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.072953 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-27zgr"] Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096153 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb6cn\" (UniqueName: \"kubernetes.io/projected/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-kube-api-access-xb6cn\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096236 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-sockets\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096262 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qh7l\" (UniqueName: \"kubernetes.io/projected/ef83d5c0-5353-4e94-854d-e34141ac2982-kube-api-access-9qh7l\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcsl8\" (UID: \"ef83d5c0-5353-4e94-854d-e34141ac2982\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096298 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096325 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-conf\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096353 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-reloader\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096381 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics-certs\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096417 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/80fd8481-986f-4374-8c23-7da080041285-metallb-excludel2\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096456 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf42l\" (UniqueName: \"kubernetes.io/projected/cf9c9412-f2f2-490f-ab66-3d2bd543d519-kube-api-access-wf42l\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096481 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-cert\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096504 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ef83d5c0-5353-4e94-854d-e34141ac2982-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcsl8\" (UID: \"ef83d5c0-5353-4e94-854d-e34141ac2982\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096531 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-metrics-certs\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096560 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jmfc\" (UniqueName: \"kubernetes.io/projected/80fd8481-986f-4374-8c23-7da080041285-kube-api-access-8jmfc\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096599 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096619 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-metrics-certs\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.096659 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-startup\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.097839 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-startup\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.098175 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-sockets\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.098639 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-frr-conf\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.098824 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-reloader\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.098891 4650 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.098930 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics-certs podName:cf9c9412-f2f2-490f-ab66-3d2bd543d519 nodeName:}" failed. No retries permitted until 2026-02-01 07:36:39.59891645 +0000 UTC m=+798.322014685 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics-certs") pod "frr-k8s-dk5xx" (UID: "cf9c9412-f2f2-490f-ab66-3d2bd543d519") : secret "frr-k8s-certs-secret" not found Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.099502 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.114718 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qh7l\" (UniqueName: \"kubernetes.io/projected/ef83d5c0-5353-4e94-854d-e34141ac2982-kube-api-access-9qh7l\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcsl8\" (UID: \"ef83d5c0-5353-4e94-854d-e34141ac2982\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.123052 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf42l\" (UniqueName: \"kubernetes.io/projected/cf9c9412-f2f2-490f-ab66-3d2bd543d519-kube-api-access-wf42l\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.125763 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ef83d5c0-5353-4e94-854d-e34141ac2982-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcsl8\" (UID: \"ef83d5c0-5353-4e94-854d-e34141ac2982\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.198627 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.198735 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/80fd8481-986f-4374-8c23-7da080041285-metallb-excludel2\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.198773 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-cert\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.198796 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-metrics-certs\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.198820 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jmfc\" (UniqueName: \"kubernetes.io/projected/80fd8481-986f-4374-8c23-7da080041285-kube-api-access-8jmfc\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.198840 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-metrics-certs\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.198862 4650 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.198972 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist podName:80fd8481-986f-4374-8c23-7da080041285 nodeName:}" failed. No retries permitted until 2026-02-01 07:36:39.69894239 +0000 UTC m=+798.422040645 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist") pod "speaker-d8c5z" (UID: "80fd8481-986f-4374-8c23-7da080041285") : secret "metallb-memberlist" not found Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.198884 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb6cn\" (UniqueName: \"kubernetes.io/projected/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-kube-api-access-xb6cn\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.199335 4650 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.199472 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-metrics-certs podName:80fd8481-986f-4374-8c23-7da080041285 nodeName:}" failed. No retries permitted until 2026-02-01 07:36:39.699453123 +0000 UTC m=+798.422551368 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-metrics-certs") pod "speaker-d8c5z" (UID: "80fd8481-986f-4374-8c23-7da080041285") : secret "speaker-certs-secret" not found Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.200097 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/80fd8481-986f-4374-8c23-7da080041285-metallb-excludel2\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.202959 4650 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.204540 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-metrics-certs\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.214389 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-cert\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.219435 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb6cn\" (UniqueName: \"kubernetes.io/projected/5cd9c5b4-5653-49aa-8219-21fa9cdabeca-kube-api-access-xb6cn\") pod \"controller-6968d8fdc4-27zgr\" (UID: \"5cd9c5b4-5653-49aa-8219-21fa9cdabeca\") " pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.223416 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.224832 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jmfc\" (UniqueName: \"kubernetes.io/projected/80fd8481-986f-4374-8c23-7da080041285-kube-api-access-8jmfc\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.379883 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.514899 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8"] Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.609662 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics-certs\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.614196 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cf9c9412-f2f2-490f-ab66-3d2bd543d519-metrics-certs\") pod \"frr-k8s-dk5xx\" (UID: \"cf9c9412-f2f2-490f-ab66-3d2bd543d519\") " pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.710755 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.710829 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-metrics-certs\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.710941 4650 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 01 07:36:39 crc kubenswrapper[4650]: E0201 07:36:39.711062 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist podName:80fd8481-986f-4374-8c23-7da080041285 nodeName:}" failed. No retries permitted until 2026-02-01 07:36:40.711019901 +0000 UTC m=+799.434118136 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist") pod "speaker-d8c5z" (UID: "80fd8481-986f-4374-8c23-7da080041285") : secret "metallb-memberlist" not found Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.714564 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-metrics-certs\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.753324 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-27zgr"] Feb 01 07:36:39 crc kubenswrapper[4650]: W0201 07:36:39.758932 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cd9c5b4_5653_49aa_8219_21fa9cdabeca.slice/crio-5253d846b452245942c66b43ee44d73f33bc455f381282b18fe8247791483717 WatchSource:0}: Error finding container 5253d846b452245942c66b43ee44d73f33bc455f381282b18fe8247791483717: Status 404 returned error can't find the container with id 5253d846b452245942c66b43ee44d73f33bc455f381282b18fe8247791483717 Feb 01 07:36:39 crc kubenswrapper[4650]: I0201 07:36:39.818201 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.326414 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerStarted","Data":"4fded09a2564e512d831eb3978fca859a1eb9fded53e7ff1578cdecb85702689"} Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.330357 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-27zgr" event={"ID":"5cd9c5b4-5653-49aa-8219-21fa9cdabeca","Type":"ContainerStarted","Data":"8cd6b2b33a5a14bb592cd0d1a929f2ae30b5efa93124da5e1e75f1c116e13daa"} Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.330384 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-27zgr" event={"ID":"5cd9c5b4-5653-49aa-8219-21fa9cdabeca","Type":"ContainerStarted","Data":"984412d395b1f6afbd0eee9c3997e79c23420b830b30175e1d641f52010429f2"} Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.330392 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-27zgr" event={"ID":"5cd9c5b4-5653-49aa-8219-21fa9cdabeca","Type":"ContainerStarted","Data":"5253d846b452245942c66b43ee44d73f33bc455f381282b18fe8247791483717"} Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.331130 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.332449 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" event={"ID":"ef83d5c0-5353-4e94-854d-e34141ac2982","Type":"ContainerStarted","Data":"579052474b1e36d223db2ea0dab94c7741bd04daded85670bdffa132b0561ca3"} Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.350860 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-27zgr" podStartSLOduration=1.350843666 podStartE2EDuration="1.350843666s" podCreationTimestamp="2026-02-01 07:36:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:36:40.34944138 +0000 UTC m=+799.072539625" watchObservedRunningTime="2026-02-01 07:36:40.350843666 +0000 UTC m=+799.073941901" Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.726587 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.750908 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/80fd8481-986f-4374-8c23-7da080041285-memberlist\") pod \"speaker-d8c5z\" (UID: \"80fd8481-986f-4374-8c23-7da080041285\") " pod="metallb-system/speaker-d8c5z" Feb 01 07:36:40 crc kubenswrapper[4650]: I0201 07:36:40.855106 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-d8c5z" Feb 01 07:36:40 crc kubenswrapper[4650]: W0201 07:36:40.886284 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80fd8481_986f_4374_8c23_7da080041285.slice/crio-80047e940b7303d3475eae16c9fafb8a34fc2816dca155f1954acd36980e03d6 WatchSource:0}: Error finding container 80047e940b7303d3475eae16c9fafb8a34fc2816dca155f1954acd36980e03d6: Status 404 returned error can't find the container with id 80047e940b7303d3475eae16c9fafb8a34fc2816dca155f1954acd36980e03d6 Feb 01 07:36:41 crc kubenswrapper[4650]: I0201 07:36:41.347303 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-d8c5z" event={"ID":"80fd8481-986f-4374-8c23-7da080041285","Type":"ContainerStarted","Data":"0dd5ac0bb74e32e8b7903f77d9ac37b3b5251c729b8b0f48ea2a8c5526ebfb9c"} Feb 01 07:36:41 crc kubenswrapper[4650]: I0201 07:36:41.347356 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-d8c5z" event={"ID":"80fd8481-986f-4374-8c23-7da080041285","Type":"ContainerStarted","Data":"80047e940b7303d3475eae16c9fafb8a34fc2816dca155f1954acd36980e03d6"} Feb 01 07:36:42 crc kubenswrapper[4650]: I0201 07:36:42.373512 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-d8c5z" event={"ID":"80fd8481-986f-4374-8c23-7da080041285","Type":"ContainerStarted","Data":"42ee060a376a2bac27bb7d9d18081271c00d864eb45bcdc323cec11fc8411b1b"} Feb 01 07:36:42 crc kubenswrapper[4650]: I0201 07:36:42.373914 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-d8c5z" Feb 01 07:36:42 crc kubenswrapper[4650]: I0201 07:36:42.399553 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-d8c5z" podStartSLOduration=3.3995366000000002 podStartE2EDuration="3.3995366s" podCreationTimestamp="2026-02-01 07:36:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:36:42.398501653 +0000 UTC m=+801.121599898" watchObservedRunningTime="2026-02-01 07:36:42.3995366 +0000 UTC m=+801.122634845" Feb 01 07:36:48 crc kubenswrapper[4650]: I0201 07:36:48.414797 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" event={"ID":"ef83d5c0-5353-4e94-854d-e34141ac2982","Type":"ContainerStarted","Data":"f7b2c0c5695871d39411dd7ac692f93a896162b4c8d2bc9830981f556957a7bd"} Feb 01 07:36:48 crc kubenswrapper[4650]: I0201 07:36:48.415378 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:36:48 crc kubenswrapper[4650]: I0201 07:36:48.417158 4650 generic.go:334] "Generic (PLEG): container finished" podID="cf9c9412-f2f2-490f-ab66-3d2bd543d519" containerID="38b2d8bfd230504c3f8219313a4ea4166d212033ed9e45ffd2aea5c87e1a9b9b" exitCode=0 Feb 01 07:36:48 crc kubenswrapper[4650]: I0201 07:36:48.417219 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerDied","Data":"38b2d8bfd230504c3f8219313a4ea4166d212033ed9e45ffd2aea5c87e1a9b9b"} Feb 01 07:36:48 crc kubenswrapper[4650]: I0201 07:36:48.441171 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" podStartSLOduration=2.469362371 podStartE2EDuration="10.441142183s" podCreationTimestamp="2026-02-01 07:36:38 +0000 UTC" firstStartedPulling="2026-02-01 07:36:39.543475493 +0000 UTC m=+798.266573738" lastFinishedPulling="2026-02-01 07:36:47.515255305 +0000 UTC m=+806.238353550" observedRunningTime="2026-02-01 07:36:48.431309946 +0000 UTC m=+807.154408201" watchObservedRunningTime="2026-02-01 07:36:48.441142183 +0000 UTC m=+807.164240508" Feb 01 07:36:49 crc kubenswrapper[4650]: I0201 07:36:49.386597 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-27zgr" Feb 01 07:36:49 crc kubenswrapper[4650]: I0201 07:36:49.427591 4650 generic.go:334] "Generic (PLEG): container finished" podID="cf9c9412-f2f2-490f-ab66-3d2bd543d519" containerID="1f966cb3f4472523c4b03824069f6ef7f124c1c99581dffc6999ea6564af4096" exitCode=0 Feb 01 07:36:49 crc kubenswrapper[4650]: I0201 07:36:49.427654 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerDied","Data":"1f966cb3f4472523c4b03824069f6ef7f124c1c99581dffc6999ea6564af4096"} Feb 01 07:36:50 crc kubenswrapper[4650]: I0201 07:36:50.435878 4650 generic.go:334] "Generic (PLEG): container finished" podID="cf9c9412-f2f2-490f-ab66-3d2bd543d519" containerID="d1cc0ffec17885256375e42d0ee3395c486be637862fbb876b1f3d191da3273a" exitCode=0 Feb 01 07:36:50 crc kubenswrapper[4650]: I0201 07:36:50.435923 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerDied","Data":"d1cc0ffec17885256375e42d0ee3395c486be637862fbb876b1f3d191da3273a"} Feb 01 07:36:51 crc kubenswrapper[4650]: I0201 07:36:51.447720 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerStarted","Data":"e9358be82fc72be38394ed5da4239360c1fb042e7c02b9594ae9e2fb59dc962c"} Feb 01 07:36:51 crc kubenswrapper[4650]: I0201 07:36:51.448389 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerStarted","Data":"d597a4cdc660a1c54f92760ed28be6025b1a74984b50891c36d9f2751467ada4"} Feb 01 07:36:51 crc kubenswrapper[4650]: I0201 07:36:51.448406 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerStarted","Data":"17b39cc8e7ede9a4745412412441ba02edeeb0ee46dce9b12c565533c7e81d90"} Feb 01 07:36:51 crc kubenswrapper[4650]: I0201 07:36:51.448417 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerStarted","Data":"8ca14dddb04bac0542d710f278f5c5917e9b0447152e19f2012a172892d960e6"} Feb 01 07:36:51 crc kubenswrapper[4650]: I0201 07:36:51.448429 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerStarted","Data":"7ea53756f435861d164568b63ee0727fe3be3f4fe27e3afec192923610e15e99"} Feb 01 07:36:52 crc kubenswrapper[4650]: I0201 07:36:52.461211 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dk5xx" event={"ID":"cf9c9412-f2f2-490f-ab66-3d2bd543d519","Type":"ContainerStarted","Data":"08552b8c8fe351c93def0b461062d03054a5e71f229c530c92f3c8e486e68e0d"} Feb 01 07:36:52 crc kubenswrapper[4650]: I0201 07:36:52.462270 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:52 crc kubenswrapper[4650]: I0201 07:36:52.518696 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-dk5xx" podStartSLOduration=6.895023403 podStartE2EDuration="14.518670088s" podCreationTimestamp="2026-02-01 07:36:38 +0000 UTC" firstStartedPulling="2026-02-01 07:36:39.909001556 +0000 UTC m=+798.632099801" lastFinishedPulling="2026-02-01 07:36:47.532648241 +0000 UTC m=+806.255746486" observedRunningTime="2026-02-01 07:36:52.516536412 +0000 UTC m=+811.239634737" watchObservedRunningTime="2026-02-01 07:36:52.518670088 +0000 UTC m=+811.241768373" Feb 01 07:36:54 crc kubenswrapper[4650]: I0201 07:36:54.819313 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:54 crc kubenswrapper[4650]: I0201 07:36:54.982620 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:36:59 crc kubenswrapper[4650]: I0201 07:36:59.230628 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcsl8" Feb 01 07:37:00 crc kubenswrapper[4650]: I0201 07:37:00.861995 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-d8c5z" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.614885 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-48qdx"] Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.615737 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-48qdx" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.620510 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-h2zcx" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.621738 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.625577 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.635626 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-48qdx"] Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.679390 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6bk2\" (UniqueName: \"kubernetes.io/projected/282547c2-b668-4534-9550-1e0ac2a064b9-kube-api-access-h6bk2\") pod \"openstack-operator-index-48qdx\" (UID: \"282547c2-b668-4534-9550-1e0ac2a064b9\") " pod="openstack-operators/openstack-operator-index-48qdx" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.780570 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6bk2\" (UniqueName: \"kubernetes.io/projected/282547c2-b668-4534-9550-1e0ac2a064b9-kube-api-access-h6bk2\") pod \"openstack-operator-index-48qdx\" (UID: \"282547c2-b668-4534-9550-1e0ac2a064b9\") " pod="openstack-operators/openstack-operator-index-48qdx" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.805840 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6bk2\" (UniqueName: \"kubernetes.io/projected/282547c2-b668-4534-9550-1e0ac2a064b9-kube-api-access-h6bk2\") pod \"openstack-operator-index-48qdx\" (UID: \"282547c2-b668-4534-9550-1e0ac2a064b9\") " pod="openstack-operators/openstack-operator-index-48qdx" Feb 01 07:37:03 crc kubenswrapper[4650]: I0201 07:37:03.936437 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-48qdx" Feb 01 07:37:04 crc kubenswrapper[4650]: I0201 07:37:04.516502 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-48qdx"] Feb 01 07:37:04 crc kubenswrapper[4650]: I0201 07:37:04.563981 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-48qdx" event={"ID":"282547c2-b668-4534-9550-1e0ac2a064b9","Type":"ContainerStarted","Data":"f546fbb49188579cd42324ce904fbe939890d07d8b325adb9d95c668355b3d96"} Feb 01 07:37:05 crc kubenswrapper[4650]: I0201 07:37:05.982878 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-48qdx"] Feb 01 07:37:06 crc kubenswrapper[4650]: I0201 07:37:06.390002 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-wj6vn"] Feb 01 07:37:06 crc kubenswrapper[4650]: I0201 07:37:06.392631 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:06 crc kubenswrapper[4650]: I0201 07:37:06.400467 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-wj6vn"] Feb 01 07:37:06 crc kubenswrapper[4650]: I0201 07:37:06.531912 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2bxf\" (UniqueName: \"kubernetes.io/projected/f8c846e9-c1e1-4745-8496-931667a06ca0-kube-api-access-t2bxf\") pod \"openstack-operator-index-wj6vn\" (UID: \"f8c846e9-c1e1-4745-8496-931667a06ca0\") " pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:06 crc kubenswrapper[4650]: I0201 07:37:06.632738 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2bxf\" (UniqueName: \"kubernetes.io/projected/f8c846e9-c1e1-4745-8496-931667a06ca0-kube-api-access-t2bxf\") pod \"openstack-operator-index-wj6vn\" (UID: \"f8c846e9-c1e1-4745-8496-931667a06ca0\") " pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:06 crc kubenswrapper[4650]: I0201 07:37:06.650828 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2bxf\" (UniqueName: \"kubernetes.io/projected/f8c846e9-c1e1-4745-8496-931667a06ca0-kube-api-access-t2bxf\") pod \"openstack-operator-index-wj6vn\" (UID: \"f8c846e9-c1e1-4745-8496-931667a06ca0\") " pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:06 crc kubenswrapper[4650]: I0201 07:37:06.748589 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:07 crc kubenswrapper[4650]: I0201 07:37:07.041647 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-wj6vn"] Feb 01 07:37:07 crc kubenswrapper[4650]: W0201 07:37:07.059384 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8c846e9_c1e1_4745_8496_931667a06ca0.slice/crio-1f23aaecc71e72b6ce12220abab80e7b6d244d464c033466555fd6be27761893 WatchSource:0}: Error finding container 1f23aaecc71e72b6ce12220abab80e7b6d244d464c033466555fd6be27761893: Status 404 returned error can't find the container with id 1f23aaecc71e72b6ce12220abab80e7b6d244d464c033466555fd6be27761893 Feb 01 07:37:07 crc kubenswrapper[4650]: I0201 07:37:07.586711 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wj6vn" event={"ID":"f8c846e9-c1e1-4745-8496-931667a06ca0","Type":"ContainerStarted","Data":"5f2774e50c4e9af1101a241b64df16eccc591980d19c9341b5d4d004a2a31f58"} Feb 01 07:37:07 crc kubenswrapper[4650]: I0201 07:37:07.587187 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wj6vn" event={"ID":"f8c846e9-c1e1-4745-8496-931667a06ca0","Type":"ContainerStarted","Data":"1f23aaecc71e72b6ce12220abab80e7b6d244d464c033466555fd6be27761893"} Feb 01 07:37:07 crc kubenswrapper[4650]: I0201 07:37:07.589718 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-48qdx" event={"ID":"282547c2-b668-4534-9550-1e0ac2a064b9","Type":"ContainerStarted","Data":"dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970"} Feb 01 07:37:07 crc kubenswrapper[4650]: I0201 07:37:07.589865 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-48qdx" podUID="282547c2-b668-4534-9550-1e0ac2a064b9" containerName="registry-server" containerID="cri-o://dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970" gracePeriod=2 Feb 01 07:37:07 crc kubenswrapper[4650]: I0201 07:37:07.605281 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-wj6vn" podStartSLOduration=1.529913352 podStartE2EDuration="1.605256935s" podCreationTimestamp="2026-02-01 07:37:06 +0000 UTC" firstStartedPulling="2026-02-01 07:37:07.06282943 +0000 UTC m=+825.785927675" lastFinishedPulling="2026-02-01 07:37:07.138173003 +0000 UTC m=+825.861271258" observedRunningTime="2026-02-01 07:37:07.604973628 +0000 UTC m=+826.328071923" watchObservedRunningTime="2026-02-01 07:37:07.605256935 +0000 UTC m=+826.328355240" Feb 01 07:37:07 crc kubenswrapper[4650]: I0201 07:37:07.631267 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-48qdx" podStartSLOduration=2.290831383 podStartE2EDuration="4.631243426s" podCreationTimestamp="2026-02-01 07:37:03 +0000 UTC" firstStartedPulling="2026-02-01 07:37:04.534161567 +0000 UTC m=+823.257259812" lastFinishedPulling="2026-02-01 07:37:06.87457359 +0000 UTC m=+825.597671855" observedRunningTime="2026-02-01 07:37:07.627600701 +0000 UTC m=+826.350698976" watchObservedRunningTime="2026-02-01 07:37:07.631243426 +0000 UTC m=+826.354341701" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.039261 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-48qdx" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.151482 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6bk2\" (UniqueName: \"kubernetes.io/projected/282547c2-b668-4534-9550-1e0ac2a064b9-kube-api-access-h6bk2\") pod \"282547c2-b668-4534-9550-1e0ac2a064b9\" (UID: \"282547c2-b668-4534-9550-1e0ac2a064b9\") " Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.159633 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/282547c2-b668-4534-9550-1e0ac2a064b9-kube-api-access-h6bk2" (OuterVolumeSpecName: "kube-api-access-h6bk2") pod "282547c2-b668-4534-9550-1e0ac2a064b9" (UID: "282547c2-b668-4534-9550-1e0ac2a064b9"). InnerVolumeSpecName "kube-api-access-h6bk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.253682 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6bk2\" (UniqueName: \"kubernetes.io/projected/282547c2-b668-4534-9550-1e0ac2a064b9-kube-api-access-h6bk2\") on node \"crc\" DevicePath \"\"" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.602846 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-48qdx" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.602966 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-48qdx" event={"ID":"282547c2-b668-4534-9550-1e0ac2a064b9","Type":"ContainerDied","Data":"dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970"} Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.603503 4650 scope.go:117] "RemoveContainer" containerID="dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.602741 4650 generic.go:334] "Generic (PLEG): container finished" podID="282547c2-b668-4534-9550-1e0ac2a064b9" containerID="dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970" exitCode=0 Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.608897 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-48qdx" event={"ID":"282547c2-b668-4534-9550-1e0ac2a064b9","Type":"ContainerDied","Data":"f546fbb49188579cd42324ce904fbe939890d07d8b325adb9d95c668355b3d96"} Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.629496 4650 scope.go:117] "RemoveContainer" containerID="dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970" Feb 01 07:37:08 crc kubenswrapper[4650]: E0201 07:37:08.630413 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970\": container with ID starting with dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970 not found: ID does not exist" containerID="dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.630469 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970"} err="failed to get container status \"dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970\": rpc error: code = NotFound desc = could not find container \"dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970\": container with ID starting with dec5a655215381373795b3fb79e5fa74f196974f3937d009c767eeb23d632970 not found: ID does not exist" Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.653827 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-48qdx"] Feb 01 07:37:08 crc kubenswrapper[4650]: I0201 07:37:08.662118 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-48qdx"] Feb 01 07:37:09 crc kubenswrapper[4650]: I0201 07:37:09.823601 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-dk5xx" Feb 01 07:37:09 crc kubenswrapper[4650]: I0201 07:37:09.978116 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="282547c2-b668-4534-9550-1e0ac2a064b9" path="/var/lib/kubelet/pods/282547c2-b668-4534-9550-1e0ac2a064b9/volumes" Feb 01 07:37:16 crc kubenswrapper[4650]: I0201 07:37:16.749514 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:16 crc kubenswrapper[4650]: I0201 07:37:16.773851 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:16 crc kubenswrapper[4650]: I0201 07:37:16.807879 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:17 crc kubenswrapper[4650]: I0201 07:37:17.720220 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-wj6vn" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.036477 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd"] Feb 01 07:37:30 crc kubenswrapper[4650]: E0201 07:37:30.037377 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="282547c2-b668-4534-9550-1e0ac2a064b9" containerName="registry-server" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.037397 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="282547c2-b668-4534-9550-1e0ac2a064b9" containerName="registry-server" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.037630 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="282547c2-b668-4534-9550-1e0ac2a064b9" containerName="registry-server" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.039013 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.047351 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-c5ltw" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.048993 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd"] Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.213355 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nw7t\" (UniqueName: \"kubernetes.io/projected/51767901-713f-439b-88a1-ec136fbf0efc-kube-api-access-4nw7t\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.213471 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-bundle\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.213567 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-util\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.315107 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nw7t\" (UniqueName: \"kubernetes.io/projected/51767901-713f-439b-88a1-ec136fbf0efc-kube-api-access-4nw7t\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.315226 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-bundle\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.315322 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-util\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.316319 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-bundle\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.316400 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-util\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.352578 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nw7t\" (UniqueName: \"kubernetes.io/projected/51767901-713f-439b-88a1-ec136fbf0efc-kube-api-access-4nw7t\") pod \"39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.358258 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.597622 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd"] Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.795929 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" event={"ID":"51767901-713f-439b-88a1-ec136fbf0efc","Type":"ContainerStarted","Data":"b664096dbcab099dc594292e0f48a67e7c850b8f22a685ae21f2e18b40a37c7e"} Feb 01 07:37:30 crc kubenswrapper[4650]: I0201 07:37:30.796595 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" event={"ID":"51767901-713f-439b-88a1-ec136fbf0efc","Type":"ContainerStarted","Data":"8ccb83b3c23e3add5fe8d80f5206a2ec7cd75291dd7c4ed2b4622ed5f611115f"} Feb 01 07:37:31 crc kubenswrapper[4650]: I0201 07:37:31.804363 4650 generic.go:334] "Generic (PLEG): container finished" podID="51767901-713f-439b-88a1-ec136fbf0efc" containerID="b664096dbcab099dc594292e0f48a67e7c850b8f22a685ae21f2e18b40a37c7e" exitCode=0 Feb 01 07:37:31 crc kubenswrapper[4650]: I0201 07:37:31.804430 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" event={"ID":"51767901-713f-439b-88a1-ec136fbf0efc","Type":"ContainerDied","Data":"b664096dbcab099dc594292e0f48a67e7c850b8f22a685ae21f2e18b40a37c7e"} Feb 01 07:37:32 crc kubenswrapper[4650]: I0201 07:37:32.814395 4650 generic.go:334] "Generic (PLEG): container finished" podID="51767901-713f-439b-88a1-ec136fbf0efc" containerID="0d440f93633e830020102e818e99c89170276dc1d6a1507eb9ec7cb002e8e0b4" exitCode=0 Feb 01 07:37:32 crc kubenswrapper[4650]: I0201 07:37:32.814461 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" event={"ID":"51767901-713f-439b-88a1-ec136fbf0efc","Type":"ContainerDied","Data":"0d440f93633e830020102e818e99c89170276dc1d6a1507eb9ec7cb002e8e0b4"} Feb 01 07:37:33 crc kubenswrapper[4650]: I0201 07:37:33.823679 4650 generic.go:334] "Generic (PLEG): container finished" podID="51767901-713f-439b-88a1-ec136fbf0efc" containerID="985e778d4566eeb4cbceb6d6c0338826ba8d67b843efdcf4b6a11be6e9500b53" exitCode=0 Feb 01 07:37:33 crc kubenswrapper[4650]: I0201 07:37:33.823759 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" event={"ID":"51767901-713f-439b-88a1-ec136fbf0efc","Type":"ContainerDied","Data":"985e778d4566eeb4cbceb6d6c0338826ba8d67b843efdcf4b6a11be6e9500b53"} Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.182471 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.291416 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-util\") pod \"51767901-713f-439b-88a1-ec136fbf0efc\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.291568 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nw7t\" (UniqueName: \"kubernetes.io/projected/51767901-713f-439b-88a1-ec136fbf0efc-kube-api-access-4nw7t\") pod \"51767901-713f-439b-88a1-ec136fbf0efc\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.291612 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-bundle\") pod \"51767901-713f-439b-88a1-ec136fbf0efc\" (UID: \"51767901-713f-439b-88a1-ec136fbf0efc\") " Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.292594 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-bundle" (OuterVolumeSpecName: "bundle") pod "51767901-713f-439b-88a1-ec136fbf0efc" (UID: "51767901-713f-439b-88a1-ec136fbf0efc"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.303388 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51767901-713f-439b-88a1-ec136fbf0efc-kube-api-access-4nw7t" (OuterVolumeSpecName: "kube-api-access-4nw7t") pod "51767901-713f-439b-88a1-ec136fbf0efc" (UID: "51767901-713f-439b-88a1-ec136fbf0efc"). InnerVolumeSpecName "kube-api-access-4nw7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.321477 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-util" (OuterVolumeSpecName: "util") pod "51767901-713f-439b-88a1-ec136fbf0efc" (UID: "51767901-713f-439b-88a1-ec136fbf0efc"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.393489 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nw7t\" (UniqueName: \"kubernetes.io/projected/51767901-713f-439b-88a1-ec136fbf0efc-kube-api-access-4nw7t\") on node \"crc\" DevicePath \"\"" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.393524 4650 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.393534 4650 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51767901-713f-439b-88a1-ec136fbf0efc-util\") on node \"crc\" DevicePath \"\"" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.846076 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" event={"ID":"51767901-713f-439b-88a1-ec136fbf0efc","Type":"ContainerDied","Data":"8ccb83b3c23e3add5fe8d80f5206a2ec7cd75291dd7c4ed2b4622ed5f611115f"} Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.846119 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ccb83b3c23e3add5fe8d80f5206a2ec7cd75291dd7c4ed2b4622ed5f611115f" Feb 01 07:37:35 crc kubenswrapper[4650]: I0201 07:37:35.846652 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.662952 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk"] Feb 01 07:37:38 crc kubenswrapper[4650]: E0201 07:37:38.663443 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51767901-713f-439b-88a1-ec136fbf0efc" containerName="util" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.663456 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51767901-713f-439b-88a1-ec136fbf0efc" containerName="util" Feb 01 07:37:38 crc kubenswrapper[4650]: E0201 07:37:38.663469 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51767901-713f-439b-88a1-ec136fbf0efc" containerName="extract" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.663474 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51767901-713f-439b-88a1-ec136fbf0efc" containerName="extract" Feb 01 07:37:38 crc kubenswrapper[4650]: E0201 07:37:38.663490 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51767901-713f-439b-88a1-ec136fbf0efc" containerName="pull" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.663497 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51767901-713f-439b-88a1-ec136fbf0efc" containerName="pull" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.663591 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="51767901-713f-439b-88a1-ec136fbf0efc" containerName="extract" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.663959 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.671069 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-dj47r" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.689673 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk"] Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.847823 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds958\" (UniqueName: \"kubernetes.io/projected/29fe6ea0-af9b-4d10-a048-f215ba0ae8f5-kube-api-access-ds958\") pod \"openstack-operator-controller-init-68b5494db6-rgvsk\" (UID: \"29fe6ea0-af9b-4d10-a048-f215ba0ae8f5\") " pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.949606 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds958\" (UniqueName: \"kubernetes.io/projected/29fe6ea0-af9b-4d10-a048-f215ba0ae8f5-kube-api-access-ds958\") pod \"openstack-operator-controller-init-68b5494db6-rgvsk\" (UID: \"29fe6ea0-af9b-4d10-a048-f215ba0ae8f5\") " pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" Feb 01 07:37:38 crc kubenswrapper[4650]: I0201 07:37:38.972527 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds958\" (UniqueName: \"kubernetes.io/projected/29fe6ea0-af9b-4d10-a048-f215ba0ae8f5-kube-api-access-ds958\") pod \"openstack-operator-controller-init-68b5494db6-rgvsk\" (UID: \"29fe6ea0-af9b-4d10-a048-f215ba0ae8f5\") " pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" Feb 01 07:37:39 crc kubenswrapper[4650]: I0201 07:37:39.024771 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" Feb 01 07:37:39 crc kubenswrapper[4650]: I0201 07:37:39.545448 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk"] Feb 01 07:37:39 crc kubenswrapper[4650]: I0201 07:37:39.882862 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" event={"ID":"29fe6ea0-af9b-4d10-a048-f215ba0ae8f5","Type":"ContainerStarted","Data":"5fb685521a1d18ec6b6cad573e3f1d40e263ad6bd060db391a0b896a89469801"} Feb 01 07:37:44 crc kubenswrapper[4650]: I0201 07:37:44.927471 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" event={"ID":"29fe6ea0-af9b-4d10-a048-f215ba0ae8f5","Type":"ContainerStarted","Data":"7899b0cebbc36cd2c7c4bc0d74853574432d445f305049d508dafd634b5df9a7"} Feb 01 07:37:44 crc kubenswrapper[4650]: I0201 07:37:44.928124 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" Feb 01 07:37:44 crc kubenswrapper[4650]: I0201 07:37:44.967832 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" podStartSLOduration=2.360005614 podStartE2EDuration="6.967810221s" podCreationTimestamp="2026-02-01 07:37:38 +0000 UTC" firstStartedPulling="2026-02-01 07:37:39.541854727 +0000 UTC m=+858.264952972" lastFinishedPulling="2026-02-01 07:37:44.149659334 +0000 UTC m=+862.872757579" observedRunningTime="2026-02-01 07:37:44.96699106 +0000 UTC m=+863.690089345" watchObservedRunningTime="2026-02-01 07:37:44.967810221 +0000 UTC m=+863.690908476" Feb 01 07:37:49 crc kubenswrapper[4650]: I0201 07:37:49.031576 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-68b5494db6-rgvsk" Feb 01 07:38:07 crc kubenswrapper[4650]: I0201 07:38:07.161364 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:38:07 crc kubenswrapper[4650]: I0201 07:38:07.162127 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.337667 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.343417 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.344077 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.343617 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.346560 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-qcmq7" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.347195 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-zj6dd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.354244 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.360059 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.373888 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.374848 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.387196 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-kr62k" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.392456 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.429233 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.430399 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.432178 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.433151 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.438894 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-b5m29" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.441094 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-rxm4p" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.452071 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.457432 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7rxm\" (UniqueName: \"kubernetes.io/projected/f093c345-aa69-48e5-989c-a1ff94898684-kube-api-access-r7rxm\") pod \"designate-operator-controller-manager-6d9697b7f4-2rm5k\" (UID: \"f093c345-aa69-48e5-989c-a1ff94898684\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.457494 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dhmg\" (UniqueName: \"kubernetes.io/projected/99856058-8981-4ea6-9621-b9908bfd3bc1-kube-api-access-4dhmg\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-c8x6z\" (UID: \"99856058-8981-4ea6-9621-b9908bfd3bc1\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.457540 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf89g\" (UniqueName: \"kubernetes.io/projected/c7a90234-9c82-425f-81e6-6fc434196e89-kube-api-access-wf89g\") pod \"cinder-operator-controller-manager-8d874c8fc-9wdtd\" (UID: \"c7a90234-9c82-425f-81e6-6fc434196e89\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.477302 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.481248 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.481960 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.492749 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-7qjp6" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.504371 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.519625 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.520963 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.530522 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-4v2ck" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.531193 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.551451 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.552390 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.567208 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch7zb\" (UniqueName: \"kubernetes.io/projected/315b2715-63dd-4a0c-8fd3-4fe29f443a76-kube-api-access-ch7zb\") pod \"horizon-operator-controller-manager-5fb775575f-w64pj\" (UID: \"315b2715-63dd-4a0c-8fd3-4fe29f443a76\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.567377 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dhmg\" (UniqueName: \"kubernetes.io/projected/99856058-8981-4ea6-9621-b9908bfd3bc1-kube-api-access-4dhmg\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-c8x6z\" (UID: \"99856058-8981-4ea6-9621-b9908bfd3bc1\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.567524 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf89g\" (UniqueName: \"kubernetes.io/projected/c7a90234-9c82-425f-81e6-6fc434196e89-kube-api-access-wf89g\") pod \"cinder-operator-controller-manager-8d874c8fc-9wdtd\" (UID: \"c7a90234-9c82-425f-81e6-6fc434196e89\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.567600 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-mfm9n" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.567651 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh9lc\" (UniqueName: \"kubernetes.io/projected/9f334ccd-b794-456b-97f9-4a57cc8005b3-kube-api-access-fh9lc\") pod \"glance-operator-controller-manager-8886f4c47-zg5gd\" (UID: \"9f334ccd-b794-456b-97f9-4a57cc8005b3\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.567703 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgn2s\" (UniqueName: \"kubernetes.io/projected/31eb2e76-b750-4d61-ba29-39a830fae2e1-kube-api-access-kgn2s\") pod \"heat-operator-controller-manager-69d6db494d-8x4cp\" (UID: \"31eb2e76-b750-4d61-ba29-39a830fae2e1\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.567988 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7rxm\" (UniqueName: \"kubernetes.io/projected/f093c345-aa69-48e5-989c-a1ff94898684-kube-api-access-r7rxm\") pod \"designate-operator-controller-manager-6d9697b7f4-2rm5k\" (UID: \"f093c345-aa69-48e5-989c-a1ff94898684\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.575180 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.602595 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.612417 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf89g\" (UniqueName: \"kubernetes.io/projected/c7a90234-9c82-425f-81e6-6fc434196e89-kube-api-access-wf89g\") pod \"cinder-operator-controller-manager-8d874c8fc-9wdtd\" (UID: \"c7a90234-9c82-425f-81e6-6fc434196e89\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.619852 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7rxm\" (UniqueName: \"kubernetes.io/projected/f093c345-aa69-48e5-989c-a1ff94898684-kube-api-access-r7rxm\") pod \"designate-operator-controller-manager-6d9697b7f4-2rm5k\" (UID: \"f093c345-aa69-48e5-989c-a1ff94898684\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.622019 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.628181 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.656347 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-f2cbz" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.657169 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dhmg\" (UniqueName: \"kubernetes.io/projected/99856058-8981-4ea6-9621-b9908bfd3bc1-kube-api-access-4dhmg\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-c8x6z\" (UID: \"99856058-8981-4ea6-9621-b9908bfd3bc1\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.669691 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rwbn\" (UniqueName: \"kubernetes.io/projected/c5cb0a01-53a8-410b-bda0-75ae6f19164d-kube-api-access-8rwbn\") pod \"ironic-operator-controller-manager-5f4b8bd54d-74flx\" (UID: \"c5cb0a01-53a8-410b-bda0-75ae6f19164d\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.670002 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw9mm\" (UniqueName: \"kubernetes.io/projected/d7e8f67a-3581-4df8-8903-7a9ac417a653-kube-api-access-lw9mm\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.670241 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch7zb\" (UniqueName: \"kubernetes.io/projected/315b2715-63dd-4a0c-8fd3-4fe29f443a76-kube-api-access-ch7zb\") pod \"horizon-operator-controller-manager-5fb775575f-w64pj\" (UID: \"315b2715-63dd-4a0c-8fd3-4fe29f443a76\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.670348 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.670432 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh9lc\" (UniqueName: \"kubernetes.io/projected/9f334ccd-b794-456b-97f9-4a57cc8005b3-kube-api-access-fh9lc\") pod \"glance-operator-controller-manager-8886f4c47-zg5gd\" (UID: \"9f334ccd-b794-456b-97f9-4a57cc8005b3\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.670524 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgn2s\" (UniqueName: \"kubernetes.io/projected/31eb2e76-b750-4d61-ba29-39a830fae2e1-kube-api-access-kgn2s\") pod \"heat-operator-controller-manager-69d6db494d-8x4cp\" (UID: \"31eb2e76-b750-4d61-ba29-39a830fae2e1\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.682378 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.683452 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.687788 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-z5gzj" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.687958 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.692875 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.698572 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.699695 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.705572 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-h768c"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.706629 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.706628 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.707376 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-dms4b" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.707548 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.708331 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.728212 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.740884 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh9lc\" (UniqueName: \"kubernetes.io/projected/9f334ccd-b794-456b-97f9-4a57cc8005b3-kube-api-access-fh9lc\") pod \"glance-operator-controller-manager-8886f4c47-zg5gd\" (UID: \"9f334ccd-b794-456b-97f9-4a57cc8005b3\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.746504 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-nm69b" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.746683 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-57mc2" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.748626 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.749776 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch7zb\" (UniqueName: \"kubernetes.io/projected/315b2715-63dd-4a0c-8fd3-4fe29f443a76-kube-api-access-ch7zb\") pod \"horizon-operator-controller-manager-5fb775575f-w64pj\" (UID: \"315b2715-63dd-4a0c-8fd3-4fe29f443a76\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.761643 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.770167 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgn2s\" (UniqueName: \"kubernetes.io/projected/31eb2e76-b750-4d61-ba29-39a830fae2e1-kube-api-access-kgn2s\") pod \"heat-operator-controller-manager-69d6db494d-8x4cp\" (UID: \"31eb2e76-b750-4d61-ba29-39a830fae2e1\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.770464 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771234 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rwbn\" (UniqueName: \"kubernetes.io/projected/c5cb0a01-53a8-410b-bda0-75ae6f19164d-kube-api-access-8rwbn\") pod \"ironic-operator-controller-manager-5f4b8bd54d-74flx\" (UID: \"c5cb0a01-53a8-410b-bda0-75ae6f19164d\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771267 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmxhs\" (UniqueName: \"kubernetes.io/projected/41ada20b-8926-463a-aeda-24a59143fd11-kube-api-access-qmxhs\") pod \"keystone-operator-controller-manager-84f48565d4-nxkp4\" (UID: \"41ada20b-8926-463a-aeda-24a59143fd11\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771294 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj2b2\" (UniqueName: \"kubernetes.io/projected/e426a3b9-307e-43fb-b97b-07e1ca7070c0-kube-api-access-sj2b2\") pod \"neutron-operator-controller-manager-585dbc889-h768c\" (UID: \"e426a3b9-307e-43fb-b97b-07e1ca7070c0\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771312 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmd9l\" (UniqueName: \"kubernetes.io/projected/373fce62-65bd-4986-bb76-3abd15205fe7-kube-api-access-kmd9l\") pod \"manila-operator-controller-manager-7dd968899f-z2hrc\" (UID: \"373fce62-65bd-4986-bb76-3abd15205fe7\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771339 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw9mm\" (UniqueName: \"kubernetes.io/projected/d7e8f67a-3581-4df8-8903-7a9ac417a653-kube-api-access-lw9mm\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771368 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hp6h\" (UniqueName: \"kubernetes.io/projected/e65810ee-6370-4e69-9d21-b6c74af493ae-kube-api-access-7hp6h\") pod \"mariadb-operator-controller-manager-67bf948998-2pr9r\" (UID: \"e65810ee-6370-4e69-9d21-b6c74af493ae\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771408 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.771433 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bwv9\" (UniqueName: \"kubernetes.io/projected/e82b6e7a-07b2-4ad3-a94e-70a7c398a401-kube-api-access-8bwv9\") pod \"nova-operator-controller-manager-55bff696bd-ktrgf\" (UID: \"e82b6e7a-07b2-4ad3-a94e-70a7c398a401\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" Feb 01 07:38:25 crc kubenswrapper[4650]: E0201 07:38:25.771832 4650 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:25 crc kubenswrapper[4650]: E0201 07:38:25.771871 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert podName:d7e8f67a-3581-4df8-8903-7a9ac417a653 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:26.271858114 +0000 UTC m=+904.994956349 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert") pod "infra-operator-controller-manager-79955696d6-v2tmb" (UID: "d7e8f67a-3581-4df8-8903-7a9ac417a653") : secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.788461 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.806083 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-h768c"] Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.842657 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw9mm\" (UniqueName: \"kubernetes.io/projected/d7e8f67a-3581-4df8-8903-7a9ac417a653-kube-api-access-lw9mm\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.874139 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.912932 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bwv9\" (UniqueName: \"kubernetes.io/projected/e82b6e7a-07b2-4ad3-a94e-70a7c398a401-kube-api-access-8bwv9\") pod \"nova-operator-controller-manager-55bff696bd-ktrgf\" (UID: \"e82b6e7a-07b2-4ad3-a94e-70a7c398a401\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.913122 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmxhs\" (UniqueName: \"kubernetes.io/projected/41ada20b-8926-463a-aeda-24a59143fd11-kube-api-access-qmxhs\") pod \"keystone-operator-controller-manager-84f48565d4-nxkp4\" (UID: \"41ada20b-8926-463a-aeda-24a59143fd11\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.913312 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj2b2\" (UniqueName: \"kubernetes.io/projected/e426a3b9-307e-43fb-b97b-07e1ca7070c0-kube-api-access-sj2b2\") pod \"neutron-operator-controller-manager-585dbc889-h768c\" (UID: \"e426a3b9-307e-43fb-b97b-07e1ca7070c0\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.913442 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmd9l\" (UniqueName: \"kubernetes.io/projected/373fce62-65bd-4986-bb76-3abd15205fe7-kube-api-access-kmd9l\") pod \"manila-operator-controller-manager-7dd968899f-z2hrc\" (UID: \"373fce62-65bd-4986-bb76-3abd15205fe7\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.913642 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hp6h\" (UniqueName: \"kubernetes.io/projected/e65810ee-6370-4e69-9d21-b6c74af493ae-kube-api-access-7hp6h\") pod \"mariadb-operator-controller-manager-67bf948998-2pr9r\" (UID: \"e65810ee-6370-4e69-9d21-b6c74af493ae\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" Feb 01 07:38:25 crc kubenswrapper[4650]: I0201 07:38:25.966165 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rwbn\" (UniqueName: \"kubernetes.io/projected/c5cb0a01-53a8-410b-bda0-75ae6f19164d-kube-api-access-8rwbn\") pod \"ironic-operator-controller-manager-5f4b8bd54d-74flx\" (UID: \"c5cb0a01-53a8-410b-bda0-75ae6f19164d\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.015009 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmxhs\" (UniqueName: \"kubernetes.io/projected/41ada20b-8926-463a-aeda-24a59143fd11-kube-api-access-qmxhs\") pod \"keystone-operator-controller-manager-84f48565d4-nxkp4\" (UID: \"41ada20b-8926-463a-aeda-24a59143fd11\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.015529 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hp6h\" (UniqueName: \"kubernetes.io/projected/e65810ee-6370-4e69-9d21-b6c74af493ae-kube-api-access-7hp6h\") pod \"mariadb-operator-controller-manager-67bf948998-2pr9r\" (UID: \"e65810ee-6370-4e69-9d21-b6c74af493ae\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.016686 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj2b2\" (UniqueName: \"kubernetes.io/projected/e426a3b9-307e-43fb-b97b-07e1ca7070c0-kube-api-access-sj2b2\") pod \"neutron-operator-controller-manager-585dbc889-h768c\" (UID: \"e426a3b9-307e-43fb-b97b-07e1ca7070c0\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.017230 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bwv9\" (UniqueName: \"kubernetes.io/projected/e82b6e7a-07b2-4ad3-a94e-70a7c398a401-kube-api-access-8bwv9\") pod \"nova-operator-controller-manager-55bff696bd-ktrgf\" (UID: \"e82b6e7a-07b2-4ad3-a94e-70a7c398a401\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.019212 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmd9l\" (UniqueName: \"kubernetes.io/projected/373fce62-65bd-4986-bb76-3abd15205fe7-kube-api-access-kmd9l\") pod \"manila-operator-controller-manager-7dd968899f-z2hrc\" (UID: \"373fce62-65bd-4986-bb76-3abd15205fe7\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.027715 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.028458 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.033278 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-ljw7d" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.039924 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.046762 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.059059 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.110607 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.112351 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.117538 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-8z2qv" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.127643 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.128801 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.129949 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hshww\" (UniqueName: \"kubernetes.io/projected/db16d8c1-27f1-4922-bfca-e8e605f2add0-kube-api-access-hshww\") pod \"octavia-operator-controller-manager-6687f8d877-jbbh5\" (UID: \"db16d8c1-27f1-4922-bfca-e8e605f2add0\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.132336 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.132501 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-28k6b" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.142482 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.142551 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.143672 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.150732 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.154879 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.156089 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.157087 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-62gtj" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.163352 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-chl4c" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.170566 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.179571 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.192169 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.195151 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.200283 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.209966 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-4tmph" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.211747 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.223460 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.233571 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.234417 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-729tb\" (UniqueName: \"kubernetes.io/projected/d697c2ab-6e6d-47e6-88c6-588a21de82b5-kube-api-access-729tb\") pod \"ovn-operator-controller-manager-788c46999f-xq7tx\" (UID: \"d697c2ab-6e6d-47e6-88c6-588a21de82b5\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.234454 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxztz\" (UniqueName: \"kubernetes.io/projected/6073be66-09c1-4fd0-93d2-4e892ca290ff-kube-api-access-rxztz\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.234526 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.234548 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hshww\" (UniqueName: \"kubernetes.io/projected/db16d8c1-27f1-4922-bfca-e8e605f2add0-kube-api-access-hshww\") pod \"octavia-operator-controller-manager-6687f8d877-jbbh5\" (UID: \"db16d8c1-27f1-4922-bfca-e8e605f2add0\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.260062 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.261426 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.269807 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-x9rq4" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.279709 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.284336 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.294634 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.316144 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hshww\" (UniqueName: \"kubernetes.io/projected/db16d8c1-27f1-4922-bfca-e8e605f2add0-kube-api-access-hshww\") pod \"octavia-operator-controller-manager-6687f8d877-jbbh5\" (UID: \"db16d8c1-27f1-4922-bfca-e8e605f2add0\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.323095 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-rcncq"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.324421 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.330449 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-9n28k" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.336670 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.336736 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-729tb\" (UniqueName: \"kubernetes.io/projected/d697c2ab-6e6d-47e6-88c6-588a21de82b5-kube-api-access-729tb\") pod \"ovn-operator-controller-manager-788c46999f-xq7tx\" (UID: \"d697c2ab-6e6d-47e6-88c6-588a21de82b5\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.336776 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxztz\" (UniqueName: \"kubernetes.io/projected/6073be66-09c1-4fd0-93d2-4e892ca290ff-kube-api-access-rxztz\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.336799 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4qms\" (UniqueName: \"kubernetes.io/projected/d4c46bd6-4a47-4053-a165-5708ea7cd554-kube-api-access-j4qms\") pod \"placement-operator-controller-manager-5b964cf4cd-jsk77\" (UID: \"d4c46bd6-4a47-4053-a165-5708ea7cd554\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.336833 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwj2h\" (UniqueName: \"kubernetes.io/projected/4a2041b4-734b-488d-888b-8ee2ca3ecc16-kube-api-access-hwj2h\") pod \"swift-operator-controller-manager-657c8cbb9f-9qfrw\" (UID: \"4a2041b4-734b-488d-888b-8ee2ca3ecc16\") " pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.336867 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.336893 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb67f\" (UniqueName: \"kubernetes.io/projected/ef904e35-a87d-44e7-ad35-eddc15e4e6cb-kube-api-access-bb67f\") pod \"telemetry-operator-controller-manager-64b5b76f97-fph9h\" (UID: \"ef904e35-a87d-44e7-ad35-eddc15e4e6cb\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.337049 4650 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.337095 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert podName:6073be66-09c1-4fd0-93d2-4e892ca290ff nodeName:}" failed. No retries permitted until 2026-02-01 07:38:26.837077953 +0000 UTC m=+905.560176198 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" (UID: "6073be66-09c1-4fd0-93d2-4e892ca290ff") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.337345 4650 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.337380 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert podName:d7e8f67a-3581-4df8-8903-7a9ac417a653 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:27.33736484 +0000 UTC m=+906.060463085 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert") pod "infra-operator-controller-manager-79955696d6-v2tmb" (UID: "d7e8f67a-3581-4df8-8903-7a9ac417a653") : secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.342434 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-rcncq"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.366061 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.376937 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxztz\" (UniqueName: \"kubernetes.io/projected/6073be66-09c1-4fd0-93d2-4e892ca290ff-kube-api-access-rxztz\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.388086 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-729tb\" (UniqueName: \"kubernetes.io/projected/d697c2ab-6e6d-47e6-88c6-588a21de82b5-kube-api-access-729tb\") pod \"ovn-operator-controller-manager-788c46999f-xq7tx\" (UID: \"d697c2ab-6e6d-47e6-88c6-588a21de82b5\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.420549 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.421812 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.424297 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.424510 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.424726 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ns9gk" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.437880 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfwxh\" (UniqueName: \"kubernetes.io/projected/d7dfbfb2-7a85-4322-bae0-f6e559687cda-kube-api-access-sfwxh\") pod \"watcher-operator-controller-manager-564965969-rcncq\" (UID: \"d7dfbfb2-7a85-4322-bae0-f6e559687cda\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.437943 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4ng6\" (UniqueName: \"kubernetes.io/projected/5b185c7e-2dd4-47a5-aa03-87998587cfa4-kube-api-access-x4ng6\") pod \"test-operator-controller-manager-56f8bfcd9f-mgsq9\" (UID: \"5b185c7e-2dd4-47a5-aa03-87998587cfa4\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.438000 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4qms\" (UniqueName: \"kubernetes.io/projected/d4c46bd6-4a47-4053-a165-5708ea7cd554-kube-api-access-j4qms\") pod \"placement-operator-controller-manager-5b964cf4cd-jsk77\" (UID: \"d4c46bd6-4a47-4053-a165-5708ea7cd554\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.438086 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwj2h\" (UniqueName: \"kubernetes.io/projected/4a2041b4-734b-488d-888b-8ee2ca3ecc16-kube-api-access-hwj2h\") pod \"swift-operator-controller-manager-657c8cbb9f-9qfrw\" (UID: \"4a2041b4-734b-488d-888b-8ee2ca3ecc16\") " pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.438137 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb67f\" (UniqueName: \"kubernetes.io/projected/ef904e35-a87d-44e7-ad35-eddc15e4e6cb-kube-api-access-bb67f\") pod \"telemetry-operator-controller-manager-64b5b76f97-fph9h\" (UID: \"ef904e35-a87d-44e7-ad35-eddc15e4e6cb\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.465476 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.483990 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.506051 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwj2h\" (UniqueName: \"kubernetes.io/projected/4a2041b4-734b-488d-888b-8ee2ca3ecc16-kube-api-access-hwj2h\") pod \"swift-operator-controller-manager-657c8cbb9f-9qfrw\" (UID: \"4a2041b4-734b-488d-888b-8ee2ca3ecc16\") " pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.513559 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb67f\" (UniqueName: \"kubernetes.io/projected/ef904e35-a87d-44e7-ad35-eddc15e4e6cb-kube-api-access-bb67f\") pod \"telemetry-operator-controller-manager-64b5b76f97-fph9h\" (UID: \"ef904e35-a87d-44e7-ad35-eddc15e4e6cb\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.518696 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4qms\" (UniqueName: \"kubernetes.io/projected/d4c46bd6-4a47-4053-a165-5708ea7cd554-kube-api-access-j4qms\") pod \"placement-operator-controller-manager-5b964cf4cd-jsk77\" (UID: \"d4c46bd6-4a47-4053-a165-5708ea7cd554\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.540408 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.541725 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.541762 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfwxh\" (UniqueName: \"kubernetes.io/projected/d7dfbfb2-7a85-4322-bae0-f6e559687cda-kube-api-access-sfwxh\") pod \"watcher-operator-controller-manager-564965969-rcncq\" (UID: \"d7dfbfb2-7a85-4322-bae0-f6e559687cda\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.541813 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4ng6\" (UniqueName: \"kubernetes.io/projected/5b185c7e-2dd4-47a5-aa03-87998587cfa4-kube-api-access-x4ng6\") pod \"test-operator-controller-manager-56f8bfcd9f-mgsq9\" (UID: \"5b185c7e-2dd4-47a5-aa03-87998587cfa4\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.541876 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgj8c\" (UniqueName: \"kubernetes.io/projected/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-kube-api-access-cgj8c\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.568307 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfwxh\" (UniqueName: \"kubernetes.io/projected/d7dfbfb2-7a85-4322-bae0-f6e559687cda-kube-api-access-sfwxh\") pod \"watcher-operator-controller-manager-564965969-rcncq\" (UID: \"d7dfbfb2-7a85-4322-bae0-f6e559687cda\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.569322 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.569665 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.571367 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.579761 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-ffpww" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.591355 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4ng6\" (UniqueName: \"kubernetes.io/projected/5b185c7e-2dd4-47a5-aa03-87998587cfa4-kube-api-access-x4ng6\") pod \"test-operator-controller-manager-56f8bfcd9f-mgsq9\" (UID: \"5b185c7e-2dd4-47a5-aa03-87998587cfa4\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.613069 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2"] Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.651136 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.651212 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgj8c\" (UniqueName: \"kubernetes.io/projected/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-kube-api-access-cgj8c\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.651264 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.651526 4650 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.651575 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:27.151560308 +0000 UTC m=+905.874658553 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.652275 4650 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.652300 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:27.152292028 +0000 UTC m=+905.875390263 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "metrics-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.690742 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.730334 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.740626 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgj8c\" (UniqueName: \"kubernetes.io/projected/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-kube-api-access-cgj8c\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.752284 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.761156 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grnfm\" (UniqueName: \"kubernetes.io/projected/c6a2ff37-375f-45b0-bcda-e88907fe869e-kube-api-access-grnfm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-c89v2\" (UID: \"c6a2ff37-375f-45b0-bcda-e88907fe869e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.837644 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.865131 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grnfm\" (UniqueName: \"kubernetes.io/projected/c6a2ff37-375f-45b0-bcda-e88907fe869e-kube-api-access-grnfm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-c89v2\" (UID: \"c6a2ff37-375f-45b0-bcda-e88907fe869e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.865181 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.865306 4650 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: E0201 07:38:26.865363 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert podName:6073be66-09c1-4fd0-93d2-4e892ca290ff nodeName:}" failed. No retries permitted until 2026-02-01 07:38:27.865336575 +0000 UTC m=+906.588434820 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" (UID: "6073be66-09c1-4fd0-93d2-4e892ca290ff") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.942973 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grnfm\" (UniqueName: \"kubernetes.io/projected/c6a2ff37-375f-45b0-bcda-e88907fe869e-kube-api-access-grnfm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-c89v2\" (UID: \"c6a2ff37-375f-45b0-bcda-e88907fe869e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" Feb 01 07:38:26 crc kubenswrapper[4650]: I0201 07:38:26.995146 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" Feb 01 07:38:27 crc kubenswrapper[4650]: I0201 07:38:27.170237 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:27 crc kubenswrapper[4650]: I0201 07:38:27.170356 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.170512 4650 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.170570 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:28.170556456 +0000 UTC m=+906.893654701 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "webhook-server-cert" not found Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.170981 4650 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.171051 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:28.171020138 +0000 UTC m=+906.894118383 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "metrics-server-cert" not found Feb 01 07:38:27 crc kubenswrapper[4650]: I0201 07:38:27.373398 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.373567 4650 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.373610 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert podName:d7e8f67a-3581-4df8-8903-7a9ac417a653 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:29.373596928 +0000 UTC m=+908.096695173 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert") pod "infra-operator-controller-manager-79955696d6-v2tmb" (UID: "d7e8f67a-3581-4df8-8903-7a9ac417a653") : secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:27 crc kubenswrapper[4650]: I0201 07:38:27.836855 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k"] Feb 01 07:38:27 crc kubenswrapper[4650]: I0201 07:38:27.853108 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd"] Feb 01 07:38:27 crc kubenswrapper[4650]: I0201 07:38:27.868020 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z"] Feb 01 07:38:27 crc kubenswrapper[4650]: I0201 07:38:27.882377 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.882505 4650 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:27 crc kubenswrapper[4650]: E0201 07:38:27.882540 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert podName:6073be66-09c1-4fd0-93d2-4e892ca290ff nodeName:}" failed. No retries permitted until 2026-02-01 07:38:29.882528159 +0000 UTC m=+908.605626394 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" (UID: "6073be66-09c1-4fd0-93d2-4e892ca290ff") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.193727 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.193802 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.193915 4650 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.193939 4650 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.193983 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:30.193966454 +0000 UTC m=+908.917064699 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "webhook-server-cert" not found Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.194003 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:30.193995615 +0000 UTC m=+908.917093850 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "metrics-server-cert" not found Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.248305 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.280635 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" event={"ID":"315b2715-63dd-4a0c-8fd3-4fe29f443a76","Type":"ContainerStarted","Data":"f629a53285f745421fc7710eaf065b832734682f5e08222565cbb87313c9b1a5"} Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.283152 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" event={"ID":"f093c345-aa69-48e5-989c-a1ff94898684","Type":"ContainerStarted","Data":"737be89a48bcca4ad399426477fe39829ea4db95869d98df15066d06b137b2a4"} Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.285789 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" event={"ID":"c7a90234-9c82-425f-81e6-6fc434196e89","Type":"ContainerStarted","Data":"53a1c4dddbf7fe3d78270849643b9e6f89e098955ea58573b4dc0369134f70b4"} Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.289037 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" event={"ID":"99856058-8981-4ea6-9621-b9908bfd3bc1","Type":"ContainerStarted","Data":"e6a578d7bd02e7a222b1115ce9016f2d8dd351e84418d687726952ec63480a7c"} Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.416347 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.430115 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd"] Feb 01 07:38:28 crc kubenswrapper[4650]: W0201 07:38:28.445201 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f334ccd_b794_456b_97f9_4a57cc8005b3.slice/crio-1a4a280cb43b4712a3baa9969adda408acfd291984d098cacf043f03fbcc1621 WatchSource:0}: Error finding container 1a4a280cb43b4712a3baa9969adda408acfd291984d098cacf043f03fbcc1621: Status 404 returned error can't find the container with id 1a4a280cb43b4712a3baa9969adda408acfd291984d098cacf043f03fbcc1621 Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.569994 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.588746 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc"] Feb 01 07:38:28 crc kubenswrapper[4650]: W0201 07:38:28.600525 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod373fce62_65bd_4986_bb76_3abd15205fe7.slice/crio-1a1f0c08272950a969eefb50ab00ada657422692ed87d9bf4543573781eeef9d WatchSource:0}: Error finding container 1a1f0c08272950a969eefb50ab00ada657422692ed87d9bf4543573781eeef9d: Status 404 returned error can't find the container with id 1a1f0c08272950a969eefb50ab00ada657422692ed87d9bf4543573781eeef9d Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.726620 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.737360 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.750819 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2"] Feb 01 07:38:28 crc kubenswrapper[4650]: W0201 07:38:28.757686 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb16d8c1_27f1_4922_bfca_e8e605f2add0.slice/crio-8dc0bb7c2807bd91bf7f6193657cad6c508403753c0745cf3f753ddc2af87f2b WatchSource:0}: Error finding container 8dc0bb7c2807bd91bf7f6193657cad6c508403753c0745cf3f753ddc2af87f2b: Status 404 returned error can't find the container with id 8dc0bb7c2807bd91bf7f6193657cad6c508403753c0745cf3f753ddc2af87f2b Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.763326 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.772809 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.778734 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-h768c"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.794535 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw"] Feb 01 07:38:28 crc kubenswrapper[4650]: W0201 07:38:28.794603 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode65810ee_6370_4e69_9d21_b6c74af493ae.slice/crio-80084bbf6ee155752ea58209528c31ece9f0d7bb42f365026885b96e50a0843a WatchSource:0}: Error finding container 80084bbf6ee155752ea58209528c31ece9f0d7bb42f365026885b96e50a0843a: Status 404 returned error can't find the container with id 80084bbf6ee155752ea58209528c31ece9f0d7bb42f365026885b96e50a0843a Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.828565 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.853516 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77"] Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.893219 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9"] Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.895870 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-729tb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-788c46999f-xq7tx_openstack-operators(d697c2ab-6e6d-47e6-88c6-588a21de82b5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.897114 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" podUID="d697c2ab-6e6d-47e6-88c6-588a21de82b5" Feb 01 07:38:28 crc kubenswrapper[4650]: W0201 07:38:28.905797 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b185c7e_2dd4_47a5_aa03_87998587cfa4.slice/crio-f753ac591f9ab49adb698ce0565e1044a78417ff4397f0d4b7955558ca3dc66d WatchSource:0}: Error finding container f753ac591f9ab49adb698ce0565e1044a78417ff4397f0d4b7955558ca3dc66d: Status 404 returned error can't find the container with id f753ac591f9ab49adb698ce0565e1044a78417ff4397f0d4b7955558ca3dc66d Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.936369 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4"] Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.937721 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x4ng6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-56f8bfcd9f-mgsq9_openstack-operators(5b185c7e-2dd4-47a5-aa03-87998587cfa4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.939415 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" podUID="5b185c7e-2dd4-47a5-aa03-87998587cfa4" Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.945101 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j4qms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b964cf4cd-jsk77_openstack-operators(d4c46bd6-4a47-4053-a165-5708ea7cd554): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 01 07:38:28 crc kubenswrapper[4650]: I0201 07:38:28.946594 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-rcncq"] Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.946606 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" podUID="d4c46bd6-4a47-4053-a165-5708ea7cd554" Feb 01 07:38:28 crc kubenswrapper[4650]: W0201 07:38:28.950536 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7dfbfb2_7a85_4322_bae0_f6e559687cda.slice/crio-68771e6d3786bd6882933c3d59a834a02f8ea84868931586209c9d0051582c80 WatchSource:0}: Error finding container 68771e6d3786bd6882933c3d59a834a02f8ea84868931586209c9d0051582c80: Status 404 returned error can't find the container with id 68771e6d3786bd6882933c3d59a834a02f8ea84868931586209c9d0051582c80 Feb 01 07:38:28 crc kubenswrapper[4650]: W0201 07:38:28.951366 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41ada20b_8926_463a_aeda_24a59143fd11.slice/crio-ac99d9e195eed48f0cc0f6b1ff9d0428bf5a910226ce24f3f8c1116cc9dcfc00 WatchSource:0}: Error finding container ac99d9e195eed48f0cc0f6b1ff9d0428bf5a910226ce24f3f8c1116cc9dcfc00: Status 404 returned error can't find the container with id ac99d9e195eed48f0cc0f6b1ff9d0428bf5a910226ce24f3f8c1116cc9dcfc00 Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.955001 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:319c969e88f109b26487a9f5a67203682803d7386424703ab7ca0340be99ae17,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qmxhs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-84f48565d4-nxkp4_openstack-operators(41ada20b-8926-463a-aeda-24a59143fd11): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.956202 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" podUID="41ada20b-8926-463a-aeda-24a59143fd11" Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.957797 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sfwxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-564965969-rcncq_openstack-operators(d7dfbfb2-7a85-4322-bae0-f6e559687cda): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 01 07:38:28 crc kubenswrapper[4650]: E0201 07:38:28.959971 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" podUID="d7dfbfb2-7a85-4322-bae0-f6e559687cda" Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.301555 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" event={"ID":"5b185c7e-2dd4-47a5-aa03-87998587cfa4","Type":"ContainerStarted","Data":"f753ac591f9ab49adb698ce0565e1044a78417ff4397f0d4b7955558ca3dc66d"} Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.302959 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" podUID="5b185c7e-2dd4-47a5-aa03-87998587cfa4" Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.303728 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" event={"ID":"d7dfbfb2-7a85-4322-bae0-f6e559687cda","Type":"ContainerStarted","Data":"68771e6d3786bd6882933c3d59a834a02f8ea84868931586209c9d0051582c80"} Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.306466 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" podUID="d7dfbfb2-7a85-4322-bae0-f6e559687cda" Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.307231 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" event={"ID":"c6a2ff37-375f-45b0-bcda-e88907fe869e","Type":"ContainerStarted","Data":"e9af3d05c937952da14f08e162bcbeaa6d259981e79a3bae1fc714bf834369c6"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.309307 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" event={"ID":"4a2041b4-734b-488d-888b-8ee2ca3ecc16","Type":"ContainerStarted","Data":"264fac6316463c93b54cd6a6d869ad2195ef7e1d347179d6c9786860da41cfb0"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.310665 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" event={"ID":"ef904e35-a87d-44e7-ad35-eddc15e4e6cb","Type":"ContainerStarted","Data":"ed6b7f1b65a398e988487e294aa0e8a33eab2ce109d2e8bf7846727e954c5ed0"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.311732 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" event={"ID":"e426a3b9-307e-43fb-b97b-07e1ca7070c0","Type":"ContainerStarted","Data":"aaf991d1a84bbb10919b0e03fef0e8884b839cabbf1d208982a64e0a33f2fe6d"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.312697 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" event={"ID":"373fce62-65bd-4986-bb76-3abd15205fe7","Type":"ContainerStarted","Data":"1a1f0c08272950a969eefb50ab00ada657422692ed87d9bf4543573781eeef9d"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.317301 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" event={"ID":"41ada20b-8926-463a-aeda-24a59143fd11","Type":"ContainerStarted","Data":"ac99d9e195eed48f0cc0f6b1ff9d0428bf5a910226ce24f3f8c1116cc9dcfc00"} Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.318670 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:319c969e88f109b26487a9f5a67203682803d7386424703ab7ca0340be99ae17\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" podUID="41ada20b-8926-463a-aeda-24a59143fd11" Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.320908 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" event={"ID":"d697c2ab-6e6d-47e6-88c6-588a21de82b5","Type":"ContainerStarted","Data":"fecc131d8e439cc5e68073ca8159b9d4f58a053f477e4ce576d19a3a5cca0c58"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.331634 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" event={"ID":"e65810ee-6370-4e69-9d21-b6c74af493ae","Type":"ContainerStarted","Data":"80084bbf6ee155752ea58209528c31ece9f0d7bb42f365026885b96e50a0843a"} Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.337388 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" podUID="d697c2ab-6e6d-47e6-88c6-588a21de82b5" Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.338608 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" event={"ID":"31eb2e76-b750-4d61-ba29-39a830fae2e1","Type":"ContainerStarted","Data":"6f1bc5e5039b5b4292dbb06213f8d47f23952558f041b80d77ccc4c661562b61"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.343729 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" event={"ID":"db16d8c1-27f1-4922-bfca-e8e605f2add0","Type":"ContainerStarted","Data":"8dc0bb7c2807bd91bf7f6193657cad6c508403753c0745cf3f753ddc2af87f2b"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.351088 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" event={"ID":"d4c46bd6-4a47-4053-a165-5708ea7cd554","Type":"ContainerStarted","Data":"2fcc2bcc51859437ce289c5b24c9e0bc8070648abef4e3ffa8e867f4fd979d72"} Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.354175 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" podUID="d4c46bd6-4a47-4053-a165-5708ea7cd554" Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.360804 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" event={"ID":"e82b6e7a-07b2-4ad3-a94e-70a7c398a401","Type":"ContainerStarted","Data":"87eb49e7c797cb0e0497c5bfb25dc59a4a2f821ef1c5f7f622d4e3fbf34e15d3"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.363865 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" event={"ID":"c5cb0a01-53a8-410b-bda0-75ae6f19164d","Type":"ContainerStarted","Data":"e5a6f93ef9586b6d7df07c3a64d80ea285dba093a975f1d4b04fc0ccab68cd36"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.376445 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" event={"ID":"9f334ccd-b794-456b-97f9-4a57cc8005b3","Type":"ContainerStarted","Data":"1a4a280cb43b4712a3baa9969adda408acfd291984d098cacf043f03fbcc1621"} Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.409232 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.410312 4650 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.410359 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert podName:d7e8f67a-3581-4df8-8903-7a9ac417a653 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:33.410344678 +0000 UTC m=+912.133442923 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert") pod "infra-operator-controller-manager-79955696d6-v2tmb" (UID: "d7e8f67a-3581-4df8-8903-7a9ac417a653") : secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:29 crc kubenswrapper[4650]: I0201 07:38:29.921325 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.921543 4650 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:29 crc kubenswrapper[4650]: E0201 07:38:29.921627 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert podName:6073be66-09c1-4fd0-93d2-4e892ca290ff nodeName:}" failed. No retries permitted until 2026-02-01 07:38:33.92160697 +0000 UTC m=+912.644705215 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" (UID: "6073be66-09c1-4fd0-93d2-4e892ca290ff") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:30 crc kubenswrapper[4650]: I0201 07:38:30.224697 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:30 crc kubenswrapper[4650]: I0201 07:38:30.224778 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.225019 4650 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.225088 4650 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.225125 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:34.225099851 +0000 UTC m=+912.948198096 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "metrics-server-cert" not found Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.225270 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:34.225240545 +0000 UTC m=+912.948338790 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "webhook-server-cert" not found Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.392358 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" podUID="d697c2ab-6e6d-47e6-88c6-588a21de82b5" Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.392403 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:319c969e88f109b26487a9f5a67203682803d7386424703ab7ca0340be99ae17\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" podUID="41ada20b-8926-463a-aeda-24a59143fd11" Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.392470 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" podUID="d4c46bd6-4a47-4053-a165-5708ea7cd554" Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.392504 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" podUID="5b185c7e-2dd4-47a5-aa03-87998587cfa4" Feb 01 07:38:30 crc kubenswrapper[4650]: E0201 07:38:30.392663 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" podUID="d7dfbfb2-7a85-4322-bae0-f6e559687cda" Feb 01 07:38:33 crc kubenswrapper[4650]: I0201 07:38:33.497452 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:33 crc kubenswrapper[4650]: E0201 07:38:33.498181 4650 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:33 crc kubenswrapper[4650]: E0201 07:38:33.498234 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert podName:d7e8f67a-3581-4df8-8903-7a9ac417a653 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:41.498218003 +0000 UTC m=+920.221316238 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert") pod "infra-operator-controller-manager-79955696d6-v2tmb" (UID: "d7e8f67a-3581-4df8-8903-7a9ac417a653") : secret "infra-operator-webhook-server-cert" not found Feb 01 07:38:34 crc kubenswrapper[4650]: I0201 07:38:34.005513 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:34 crc kubenswrapper[4650]: E0201 07:38:34.005688 4650 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:34 crc kubenswrapper[4650]: E0201 07:38:34.005923 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert podName:6073be66-09c1-4fd0-93d2-4e892ca290ff nodeName:}" failed. No retries permitted until 2026-02-01 07:38:42.00589706 +0000 UTC m=+920.728995345 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" (UID: "6073be66-09c1-4fd0-93d2-4e892ca290ff") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 01 07:38:34 crc kubenswrapper[4650]: I0201 07:38:34.311017 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:34 crc kubenswrapper[4650]: I0201 07:38:34.311141 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:34 crc kubenswrapper[4650]: E0201 07:38:34.311257 4650 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 01 07:38:34 crc kubenswrapper[4650]: E0201 07:38:34.311302 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:42.311287721 +0000 UTC m=+921.034385966 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "metrics-server-cert" not found Feb 01 07:38:34 crc kubenswrapper[4650]: E0201 07:38:34.311607 4650 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 01 07:38:34 crc kubenswrapper[4650]: E0201 07:38:34.311632 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:42.31162597 +0000 UTC m=+921.034724205 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "webhook-server-cert" not found Feb 01 07:38:37 crc kubenswrapper[4650]: I0201 07:38:37.161632 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:38:37 crc kubenswrapper[4650]: I0201 07:38:37.162224 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:38:41 crc kubenswrapper[4650]: I0201 07:38:41.563481 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:41 crc kubenswrapper[4650]: I0201 07:38:41.571511 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d7e8f67a-3581-4df8-8903-7a9ac417a653-cert\") pod \"infra-operator-controller-manager-79955696d6-v2tmb\" (UID: \"d7e8f67a-3581-4df8-8903-7a9ac417a653\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:41 crc kubenswrapper[4650]: I0201 07:38:41.754794 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:38:42 crc kubenswrapper[4650]: I0201 07:38:42.071825 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:42 crc kubenswrapper[4650]: I0201 07:38:42.075907 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6073be66-09c1-4fd0-93d2-4e892ca290ff-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9\" (UID: \"6073be66-09c1-4fd0-93d2-4e892ca290ff\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:42 crc kubenswrapper[4650]: I0201 07:38:42.099354 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.317727 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.318011 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kgn2s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-69d6db494d-8x4cp_openstack-operators(31eb2e76-b750-4d61-ba29-39a830fae2e1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.319583 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" podUID="31eb2e76-b750-4d61-ba29-39a830fae2e1" Feb 01 07:38:42 crc kubenswrapper[4650]: I0201 07:38:42.377397 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:42 crc kubenswrapper[4650]: I0201 07:38:42.377629 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.377687 4650 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.377801 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs podName:6e1c3cb8-1623-42c8-8b2d-c6bc73e57496 nodeName:}" failed. No retries permitted until 2026-02-01 07:38:58.377770792 +0000 UTC m=+937.100869067 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs") pod "openstack-operator-controller-manager-67485c4bf6-7xmf2" (UID: "6e1c3cb8-1623-42c8-8b2d-c6bc73e57496") : secret "webhook-server-cert" not found Feb 01 07:38:42 crc kubenswrapper[4650]: I0201 07:38:42.392492 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-metrics-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.512011 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10\\\"\"" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" podUID="31eb2e76-b750-4d61-ba29-39a830fae2e1" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.991988 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:1f593e8d49d02b6484c89632192ae54771675c54fbd8426e3675b8e20ecfd7c4" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.992535 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:1f593e8d49d02b6484c89632192ae54771675c54fbd8426e3675b8e20ecfd7c4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fh9lc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-8886f4c47-zg5gd_openstack-operators(9f334ccd-b794-456b-97f9-4a57cc8005b3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:42 crc kubenswrapper[4650]: E0201 07:38:42.994484 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" podUID="9f334ccd-b794-456b-97f9-4a57cc8005b3" Feb 01 07:38:43 crc kubenswrapper[4650]: E0201 07:38:43.515262 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:1f593e8d49d02b6484c89632192ae54771675c54fbd8426e3675b8e20ecfd7c4\\\"\"" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" podUID="9f334ccd-b794-456b-97f9-4a57cc8005b3" Feb 01 07:38:44 crc kubenswrapper[4650]: E0201 07:38:44.702153 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6" Feb 01 07:38:44 crc kubenswrapper[4650]: E0201 07:38:44.702391 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sj2b2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-585dbc889-h768c_openstack-operators(e426a3b9-307e-43fb-b97b-07e1ca7070c0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:44 crc kubenswrapper[4650]: E0201 07:38:44.703853 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" podUID="e426a3b9-307e-43fb-b97b-07e1ca7070c0" Feb 01 07:38:45 crc kubenswrapper[4650]: E0201 07:38:45.539257 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" podUID="e426a3b9-307e-43fb-b97b-07e1ca7070c0" Feb 01 07:38:45 crc kubenswrapper[4650]: E0201 07:38:45.615234 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be" Feb 01 07:38:45 crc kubenswrapper[4650]: E0201 07:38:45.615416 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hshww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-6687f8d877-jbbh5_openstack-operators(db16d8c1-27f1-4922-bfca-e8e605f2add0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:45 crc kubenswrapper[4650]: E0201 07:38:45.617625 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" podUID="db16d8c1-27f1-4922-bfca-e8e605f2add0" Feb 01 07:38:46 crc kubenswrapper[4650]: E0201 07:38:46.546907 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" podUID="db16d8c1-27f1-4922-bfca-e8e605f2add0" Feb 01 07:38:46 crc kubenswrapper[4650]: E0201 07:38:46.924425 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:6e21a1dda86ba365817102d23a5d4d2d5dcd1c4d8e5f8d74bd24548aa8c63898" Feb 01 07:38:46 crc kubenswrapper[4650]: E0201 07:38:46.924707 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:6e21a1dda86ba365817102d23a5d4d2d5dcd1c4d8e5f8d74bd24548aa8c63898,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wf89g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-8d874c8fc-9wdtd_openstack-operators(c7a90234-9c82-425f-81e6-6fc434196e89): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:46 crc kubenswrapper[4650]: E0201 07:38:46.925910 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" podUID="c7a90234-9c82-425f-81e6-6fc434196e89" Feb 01 07:38:47 crc kubenswrapper[4650]: E0201 07:38:47.554875 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:6e21a1dda86ba365817102d23a5d4d2d5dcd1c4d8e5f8d74bd24548aa8c63898\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" podUID="c7a90234-9c82-425f-81e6-6fc434196e89" Feb 01 07:38:49 crc kubenswrapper[4650]: E0201 07:38:49.755493 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:cd911e8d7a7a1104d77691dbaaf54370015cbb82859337746db5a9186d5dc566" Feb 01 07:38:49 crc kubenswrapper[4650]: E0201 07:38:49.755792 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:cd911e8d7a7a1104d77691dbaaf54370015cbb82859337746db5a9186d5dc566,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kmd9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-7dd968899f-z2hrc_openstack-operators(373fce62-65bd-4986-bb76-3abd15205fe7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:49 crc kubenswrapper[4650]: E0201 07:38:49.757053 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" podUID="373fce62-65bd-4986-bb76-3abd15205fe7" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.128872 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-f7lrk"] Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.132765 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.143478 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f7lrk"] Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.205388 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-catalog-content\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.205434 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdp5j\" (UniqueName: \"kubernetes.io/projected/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-kube-api-access-rdp5j\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.205460 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-utilities\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.306701 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-catalog-content\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.306744 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdp5j\" (UniqueName: \"kubernetes.io/projected/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-kube-api-access-rdp5j\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.306769 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-utilities\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.307247 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-utilities\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.307447 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-catalog-content\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.344840 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdp5j\" (UniqueName: \"kubernetes.io/projected/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-kube-api-access-rdp5j\") pod \"redhat-operators-f7lrk\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: I0201 07:38:50.460455 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:38:50 crc kubenswrapper[4650]: E0201 07:38:50.572444 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:cd911e8d7a7a1104d77691dbaaf54370015cbb82859337746db5a9186d5dc566\\\"\"" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" podUID="373fce62-65bd-4986-bb76-3abd15205fe7" Feb 01 07:38:50 crc kubenswrapper[4650]: E0201 07:38:50.600006 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521" Feb 01 07:38:50 crc kubenswrapper[4650]: E0201 07:38:50.600407 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8rwbn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-5f4b8bd54d-74flx_openstack-operators(c5cb0a01-53a8-410b-bda0-75ae6f19164d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:50 crc kubenswrapper[4650]: E0201 07:38:50.601593 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" podUID="c5cb0a01-53a8-410b-bda0-75ae6f19164d" Feb 01 07:38:51 crc kubenswrapper[4650]: E0201 07:38:51.577676 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" podUID="c5cb0a01-53a8-410b-bda0-75ae6f19164d" Feb 01 07:38:53 crc kubenswrapper[4650]: E0201 07:38:53.301904 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Feb 01 07:38:53 crc kubenswrapper[4650]: E0201 07:38:53.302360 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-grnfm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-c89v2_openstack-operators(c6a2ff37-375f-45b0-bcda-e88907fe869e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:53 crc kubenswrapper[4650]: E0201 07:38:53.303693 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" podUID="c6a2ff37-375f-45b0-bcda-e88907fe869e" Feb 01 07:38:53 crc kubenswrapper[4650]: E0201 07:38:53.594483 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" podUID="c6a2ff37-375f-45b0-bcda-e88907fe869e" Feb 01 07:38:56 crc kubenswrapper[4650]: E0201 07:38:56.403230 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e" Feb 01 07:38:56 crc kubenswrapper[4650]: E0201 07:38:56.403728 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8bwv9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-55bff696bd-ktrgf_openstack-operators(e82b6e7a-07b2-4ad3-a94e-70a7c398a401): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:38:56 crc kubenswrapper[4650]: E0201 07:38:56.404922 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" podUID="e82b6e7a-07b2-4ad3-a94e-70a7c398a401" Feb 01 07:38:56 crc kubenswrapper[4650]: E0201 07:38:56.612540 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e\\\"\"" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" podUID="e82b6e7a-07b2-4ad3-a94e-70a7c398a401" Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.466831 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f7lrk"] Feb 01 07:38:57 crc kubenswrapper[4650]: W0201 07:38:57.489624 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf248938c_a9ff_46b0_b4f9_a0b5cb5dd1f3.slice/crio-0911270cdbfd7c0337d4770f5ca97e237b99275dc2be9e64aecbc3f1faec66c6 WatchSource:0}: Error finding container 0911270cdbfd7c0337d4770f5ca97e237b99275dc2be9e64aecbc3f1faec66c6: Status 404 returned error can't find the container with id 0911270cdbfd7c0337d4770f5ca97e237b99275dc2be9e64aecbc3f1faec66c6 Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.596819 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb"] Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.628655 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" event={"ID":"315b2715-63dd-4a0c-8fd3-4fe29f443a76","Type":"ContainerStarted","Data":"55965eacfee19d8f2b3a51d73d432a5f91a6d47b772c3de5e95f6a5f40a2292c"} Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.629600 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.635288 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9"] Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.641815 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" event={"ID":"e65810ee-6370-4e69-9d21-b6c74af493ae","Type":"ContainerStarted","Data":"5f436cf144e88e292a01880335cdecc463acd7b0dd2fc64d99b851645459a6f9"} Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.642539 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.661137 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f7lrk" event={"ID":"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3","Type":"ContainerStarted","Data":"0911270cdbfd7c0337d4770f5ca97e237b99275dc2be9e64aecbc3f1faec66c6"} Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.666078 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" event={"ID":"ef904e35-a87d-44e7-ad35-eddc15e4e6cb","Type":"ContainerStarted","Data":"c1b6c685b8fdd2a3477b6ca30015731f9a06123302c7945a0433e78ccc1a6e37"} Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.666637 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.667671 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" event={"ID":"99856058-8981-4ea6-9621-b9908bfd3bc1","Type":"ContainerStarted","Data":"8cfe713219d9cb26ff84d4008873d188f2f588980d6a667a7e95ef55b5b9a8ca"} Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.668059 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.669587 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" podStartSLOduration=7.1010374 podStartE2EDuration="32.669576254s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.260619785 +0000 UTC m=+906.983718030" lastFinishedPulling="2026-02-01 07:38:53.829158629 +0000 UTC m=+932.552256884" observedRunningTime="2026-02-01 07:38:57.652166634 +0000 UTC m=+936.375264889" watchObservedRunningTime="2026-02-01 07:38:57.669576254 +0000 UTC m=+936.392674499" Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.704837 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" podStartSLOduration=7.265428404 podStartE2EDuration="32.704820946s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.895129003 +0000 UTC m=+907.618227248" lastFinishedPulling="2026-02-01 07:38:54.334521545 +0000 UTC m=+933.057619790" observedRunningTime="2026-02-01 07:38:57.701823047 +0000 UTC m=+936.424921302" watchObservedRunningTime="2026-02-01 07:38:57.704820946 +0000 UTC m=+936.427919191" Feb 01 07:38:57 crc kubenswrapper[4650]: I0201 07:38:57.705122 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" podStartSLOduration=7.18574763 podStartE2EDuration="32.705117544s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.814893914 +0000 UTC m=+907.537992159" lastFinishedPulling="2026-02-01 07:38:54.334263828 +0000 UTC m=+933.057362073" observedRunningTime="2026-02-01 07:38:57.669271186 +0000 UTC m=+936.392369431" watchObservedRunningTime="2026-02-01 07:38:57.705117544 +0000 UTC m=+936.428215789" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.431531 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.444345 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e1c3cb8-1623-42c8-8b2d-c6bc73e57496-webhook-certs\") pod \"openstack-operator-controller-manager-67485c4bf6-7xmf2\" (UID: \"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496\") " pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.713288 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.714197 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" event={"ID":"d7dfbfb2-7a85-4322-bae0-f6e559687cda","Type":"ContainerStarted","Data":"31809fe98f45e27e94d2b55f4a4029f92902232c9c3499cf35221bd7c9666cb7"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.714781 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.721462 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" event={"ID":"d4c46bd6-4a47-4053-a165-5708ea7cd554","Type":"ContainerStarted","Data":"8127aa85412a0195d95756558e798a96d58ff3623907ef59491f0200fe3e1a72"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.722232 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.727819 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" event={"ID":"4a2041b4-734b-488d-888b-8ee2ca3ecc16","Type":"ContainerStarted","Data":"455c06e1fd6f756526609ec2ebca8f77bb18f01081693585a5b8b9ef3b7105fe"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.728325 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.739405 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" event={"ID":"e426a3b9-307e-43fb-b97b-07e1ca7070c0","Type":"ContainerStarted","Data":"4cd97175481e18d3e2cd51cacc163fe9cc4280af8e2e107044997bb6af585466"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.739816 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.757930 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" event={"ID":"f093c345-aa69-48e5-989c-a1ff94898684","Type":"ContainerStarted","Data":"5a96b1a6bec7d4d64a730d9f444846d402d48a07ba6ebc50a7687cf7ac9375ef"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.758626 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.764951 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" podStartSLOduration=6.727016681 podStartE2EDuration="33.764937763s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:27.907232511 +0000 UTC m=+906.630330756" lastFinishedPulling="2026-02-01 07:38:54.945153593 +0000 UTC m=+933.668251838" observedRunningTime="2026-02-01 07:38:57.72845954 +0000 UTC m=+936.451557785" watchObservedRunningTime="2026-02-01 07:38:58.764937763 +0000 UTC m=+937.488035998" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.766874 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" podStartSLOduration=4.675145115 podStartE2EDuration="32.766868624s" podCreationTimestamp="2026-02-01 07:38:26 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.957723566 +0000 UTC m=+907.680821801" lastFinishedPulling="2026-02-01 07:38:57.049447045 +0000 UTC m=+935.772545310" observedRunningTime="2026-02-01 07:38:58.763980108 +0000 UTC m=+937.487078353" watchObservedRunningTime="2026-02-01 07:38:58.766868624 +0000 UTC m=+937.489966869" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.778191 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" event={"ID":"31eb2e76-b750-4d61-ba29-39a830fae2e1","Type":"ContainerStarted","Data":"c4088c7b15413b0af0805b612529b247c6600cc6520e144a05652afd132b1c64"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.778755 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.786909 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" podStartSLOduration=5.682212571 podStartE2EDuration="33.786896753s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.944852056 +0000 UTC m=+907.667950301" lastFinishedPulling="2026-02-01 07:38:57.049536218 +0000 UTC m=+935.772634483" observedRunningTime="2026-02-01 07:38:58.785661511 +0000 UTC m=+937.508759756" watchObservedRunningTime="2026-02-01 07:38:58.786896753 +0000 UTC m=+937.509994988" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.793408 4650 generic.go:334] "Generic (PLEG): container finished" podID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerID="676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b" exitCode=0 Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.793466 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f7lrk" event={"ID":"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3","Type":"ContainerDied","Data":"676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.801681 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" event={"ID":"5b185c7e-2dd4-47a5-aa03-87998587cfa4","Type":"ContainerStarted","Data":"df9dae7f2ec4b8d3966cf3ec718995cb75475c67cf2580dfcc8ecdb8dc5ba049"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.802397 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.809925 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" podStartSLOduration=6.761326808 podStartE2EDuration="33.809907231s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:27.896336614 +0000 UTC m=+906.619434859" lastFinishedPulling="2026-02-01 07:38:54.944917037 +0000 UTC m=+933.668015282" observedRunningTime="2026-02-01 07:38:58.808673669 +0000 UTC m=+937.531771914" watchObservedRunningTime="2026-02-01 07:38:58.809907231 +0000 UTC m=+937.533005496" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.814156 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" event={"ID":"41ada20b-8926-463a-aeda-24a59143fd11","Type":"ContainerStarted","Data":"2cdb1422ce50e0797f58233f4e4fe67af81cf65a13d4893c6fd9b34090249b98"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.814738 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.825063 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" event={"ID":"d7e8f67a-3581-4df8-8903-7a9ac417a653","Type":"ContainerStarted","Data":"6707c5a8c8f815f9dd563d6c5a0acb14efa2bb02d8625e5ab53eb79fed5e59a7"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.825396 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" podStartSLOduration=5.586952135 podStartE2EDuration="33.82538068s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.84731576 +0000 UTC m=+907.570414005" lastFinishedPulling="2026-02-01 07:38:57.085744295 +0000 UTC m=+935.808842550" observedRunningTime="2026-02-01 07:38:58.82501081 +0000 UTC m=+937.548109065" watchObservedRunningTime="2026-02-01 07:38:58.82538068 +0000 UTC m=+937.548478925" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.838281 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" event={"ID":"d697c2ab-6e6d-47e6-88c6-588a21de82b5","Type":"ContainerStarted","Data":"609fe3e6a6b9f1b19985d9df59b7805020f766b0a9916acee9304cf17dbdf161"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.838968 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.845561 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" event={"ID":"6073be66-09c1-4fd0-93d2-4e892ca290ff","Type":"ContainerStarted","Data":"455ce15d048b1d168ed4342c3a4ea02cd6ac5b19f2a1bf7b32637494feab330a"} Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.912465 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" podStartSLOduration=5.799132342 podStartE2EDuration="33.912450441s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.937521763 +0000 UTC m=+907.660620008" lastFinishedPulling="2026-02-01 07:38:57.050839842 +0000 UTC m=+935.773938107" observedRunningTime="2026-02-01 07:38:58.911476806 +0000 UTC m=+937.634575051" watchObservedRunningTime="2026-02-01 07:38:58.912450441 +0000 UTC m=+937.635548686" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.916948 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" podStartSLOduration=6.996493567 podStartE2EDuration="33.91693025s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.827971179 +0000 UTC m=+907.551069424" lastFinishedPulling="2026-02-01 07:38:55.748407842 +0000 UTC m=+934.471506107" observedRunningTime="2026-02-01 07:38:58.862390458 +0000 UTC m=+937.585488703" watchObservedRunningTime="2026-02-01 07:38:58.91693025 +0000 UTC m=+937.640028495" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.979125 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" podStartSLOduration=5.747722983 podStartE2EDuration="33.979107133s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.954860111 +0000 UTC m=+907.677958356" lastFinishedPulling="2026-02-01 07:38:57.186244271 +0000 UTC m=+935.909342506" observedRunningTime="2026-02-01 07:38:58.974321807 +0000 UTC m=+937.697420052" watchObservedRunningTime="2026-02-01 07:38:58.979107133 +0000 UTC m=+937.702205378" Feb 01 07:38:58 crc kubenswrapper[4650]: I0201 07:38:58.996973 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" podStartSLOduration=5.3261961620000005 podStartE2EDuration="33.996956695s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.428362495 +0000 UTC m=+907.151460750" lastFinishedPulling="2026-02-01 07:38:57.099123028 +0000 UTC m=+935.822221283" observedRunningTime="2026-02-01 07:38:58.995937118 +0000 UTC m=+937.719035363" watchObservedRunningTime="2026-02-01 07:38:58.996956695 +0000 UTC m=+937.720054940" Feb 01 07:38:59 crc kubenswrapper[4650]: I0201 07:38:59.039285 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" podStartSLOduration=5.852569306 podStartE2EDuration="34.039259013s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.895684388 +0000 UTC m=+907.618782633" lastFinishedPulling="2026-02-01 07:38:57.082374075 +0000 UTC m=+935.805472340" observedRunningTime="2026-02-01 07:38:59.016500531 +0000 UTC m=+937.739598776" watchObservedRunningTime="2026-02-01 07:38:59.039259013 +0000 UTC m=+937.762357268" Feb 01 07:39:00 crc kubenswrapper[4650]: I0201 07:39:00.691630 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 07:39:01 crc kubenswrapper[4650]: I0201 07:39:01.284926 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2"] Feb 01 07:39:01 crc kubenswrapper[4650]: I0201 07:39:01.884819 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" event={"ID":"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496","Type":"ContainerStarted","Data":"a3e4782e57b48f6eb99da03871b03e47a1a5f888f669a7511b310579596511ed"} Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.194300 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-p6dnj"] Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.195501 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.220173 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p6dnj"] Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.311909 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-utilities\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.311984 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-catalog-content\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.312060 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnbdf\" (UniqueName: \"kubernetes.io/projected/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-kube-api-access-mnbdf\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.413441 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-catalog-content\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.413520 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnbdf\" (UniqueName: \"kubernetes.io/projected/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-kube-api-access-mnbdf\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.413559 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-utilities\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.414054 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-catalog-content\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.414099 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-utilities\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.438118 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnbdf\" (UniqueName: \"kubernetes.io/projected/6e914514-b71c-4747-8dfc-ae1eeef3c8a3-kube-api-access-mnbdf\") pod \"community-operators-p6dnj\" (UID: \"6e914514-b71c-4747-8dfc-ae1eeef3c8a3\") " pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.533407 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.893237 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" event={"ID":"6e1c3cb8-1623-42c8-8b2d-c6bc73e57496","Type":"ContainerStarted","Data":"0e72df035c7e0ba7a61d5c9cfa2d676e2067e2e5914c6c9a955d4eb1b5657f50"} Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.893436 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:39:02 crc kubenswrapper[4650]: I0201 07:39:02.921631 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" podStartSLOduration=36.921608836 podStartE2EDuration="36.921608836s" podCreationTimestamp="2026-02-01 07:38:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:39:02.917183659 +0000 UTC m=+941.640281914" watchObservedRunningTime="2026-02-01 07:39:02.921608836 +0000 UTC m=+941.644707081" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.359354 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p6dnj"] Feb 01 07:39:05 crc kubenswrapper[4650]: W0201 07:39:05.374230 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e914514_b71c_4747_8dfc_ae1eeef3c8a3.slice/crio-02db4655a210ecb2c5deb5163ea81887ef4bcbc1c6ce6be64442da59fec558e2 WatchSource:0}: Error finding container 02db4655a210ecb2c5deb5163ea81887ef4bcbc1c6ce6be64442da59fec558e2: Status 404 returned error can't find the container with id 02db4655a210ecb2c5deb5163ea81887ef4bcbc1c6ce6be64442da59fec558e2 Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.732704 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-c8x6z" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.763714 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-2rm5k" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.773638 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-8x4cp" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.882287 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-w64pj" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.920162 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" event={"ID":"d7e8f67a-3581-4df8-8903-7a9ac417a653","Type":"ContainerStarted","Data":"09d6e707facfd55bc9f5cfd4388305a0e9d3c10f36c04075f313a264ab3aa73c"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.920294 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.922064 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" event={"ID":"c7a90234-9c82-425f-81e6-6fc434196e89","Type":"ContainerStarted","Data":"0c9d5d8f594902bb6ab09d9e3ed65f748c33d7c157b467bb8cf6c9b65f84b6a6"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.922246 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.923498 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" event={"ID":"c5cb0a01-53a8-410b-bda0-75ae6f19164d","Type":"ContainerStarted","Data":"2bcbcbfb77c41fe80ff0b36d0e974888d2fea00936686d66828b241c702284b7"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.923681 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.932859 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" event={"ID":"9f334ccd-b794-456b-97f9-4a57cc8005b3","Type":"ContainerStarted","Data":"90ada9028c752cbfb267d6c6072b723896d08d034a272b49ec7e7503bca55faa"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.933100 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.935192 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f7lrk" event={"ID":"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3","Type":"ContainerStarted","Data":"97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.937166 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" event={"ID":"6073be66-09c1-4fd0-93d2-4e892ca290ff","Type":"ContainerStarted","Data":"270f25fac54cadf86035183a2e8c1d802882be6eb87ec901b1cd1026751fc080"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.937289 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.938256 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p6dnj" event={"ID":"6e914514-b71c-4747-8dfc-ae1eeef3c8a3","Type":"ContainerStarted","Data":"02db4655a210ecb2c5deb5163ea81887ef4bcbc1c6ce6be64442da59fec558e2"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.939701 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" event={"ID":"db16d8c1-27f1-4922-bfca-e8e605f2add0","Type":"ContainerStarted","Data":"a7bb39f9217616f6fabfc4be9b13be18905898fe42a692ece1480e2b7e2c153d"} Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.939959 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.963899 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" podStartSLOduration=4.893044107 podStartE2EDuration="40.963880217s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.840803228 +0000 UTC m=+907.563901473" lastFinishedPulling="2026-02-01 07:39:04.911639338 +0000 UTC m=+943.634737583" observedRunningTime="2026-02-01 07:39:05.951798338 +0000 UTC m=+944.674896583" watchObservedRunningTime="2026-02-01 07:39:05.963880217 +0000 UTC m=+944.686978462" Feb 01 07:39:05 crc kubenswrapper[4650]: I0201 07:39:05.991701 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" podStartSLOduration=33.694538433 podStartE2EDuration="40.991686652s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:57.623008104 +0000 UTC m=+936.346106349" lastFinishedPulling="2026-02-01 07:39:04.920156293 +0000 UTC m=+943.643254568" observedRunningTime="2026-02-01 07:39:05.98857505 +0000 UTC m=+944.711673295" watchObservedRunningTime="2026-02-01 07:39:05.991686652 +0000 UTC m=+944.714784897" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.016705 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" podStartSLOduration=3.964472213 podStartE2EDuration="41.016687423s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:27.87652079 +0000 UTC m=+906.599619035" lastFinishedPulling="2026-02-01 07:39:04.92873599 +0000 UTC m=+943.651834245" observedRunningTime="2026-02-01 07:39:06.013246092 +0000 UTC m=+944.736344357" watchObservedRunningTime="2026-02-01 07:39:06.016687423 +0000 UTC m=+944.739785668" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.043864 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" podStartSLOduration=4.9073481359999995 podStartE2EDuration="41.043849831s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.762700135 +0000 UTC m=+907.485798370" lastFinishedPulling="2026-02-01 07:39:04.89920182 +0000 UTC m=+943.622300065" observedRunningTime="2026-02-01 07:39:06.040215345 +0000 UTC m=+944.763313590" watchObservedRunningTime="2026-02-01 07:39:06.043849831 +0000 UTC m=+944.766948076" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.065106 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" podStartSLOduration=4.585810034 podStartE2EDuration="41.065089402s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.447529711 +0000 UTC m=+907.170627956" lastFinishedPulling="2026-02-01 07:39:04.926809069 +0000 UTC m=+943.649907324" observedRunningTime="2026-02-01 07:39:06.064220469 +0000 UTC m=+944.787318714" watchObservedRunningTime="2026-02-01 07:39:06.065089402 +0000 UTC m=+944.788187647" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.109637 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" podStartSLOduration=33.912217215 podStartE2EDuration="41.109620429s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:57.689574843 +0000 UTC m=+936.412673088" lastFinishedPulling="2026-02-01 07:39:04.886978057 +0000 UTC m=+943.610076302" observedRunningTime="2026-02-01 07:39:06.10739248 +0000 UTC m=+944.830490725" watchObservedRunningTime="2026-02-01 07:39:06.109620429 +0000 UTC m=+944.832718674" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.175086 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2pr9r" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.222963 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-h768c" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.299723 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-nxkp4" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.493884 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-xq7tx" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.572504 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-564965969-rcncq" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.699560 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jsk77" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.755730 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-657c8cbb9f-9qfrw" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.795192 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-fph9h" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.855318 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-mgsq9" Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.972650 4650 generic.go:334] "Generic (PLEG): container finished" podID="6e914514-b71c-4747-8dfc-ae1eeef3c8a3" containerID="fa175fc1db30afaa4dc8b6d8be2a3e3dc411223cb247a9ab625d7da7d77e1e23" exitCode=0 Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.972725 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p6dnj" event={"ID":"6e914514-b71c-4747-8dfc-ae1eeef3c8a3","Type":"ContainerDied","Data":"fa175fc1db30afaa4dc8b6d8be2a3e3dc411223cb247a9ab625d7da7d77e1e23"} Feb 01 07:39:06 crc kubenswrapper[4650]: I0201 07:39:06.978088 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" event={"ID":"373fce62-65bd-4986-bb76-3abd15205fe7","Type":"ContainerStarted","Data":"93df44318de495578c92c2291355fd2a9edb6c6c0e35f85fd26264a96177fcce"} Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.160874 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.160938 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.160987 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.161695 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a4559927c25b5172e0bb51589b156030237e4552bdad01ea0a510262dabc0be0"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.161762 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://a4559927c25b5172e0bb51589b156030237e4552bdad01ea0a510262dabc0be0" gracePeriod=600 Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.973490 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" podStartSLOduration=5.230924571 podStartE2EDuration="42.973475367s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.607926438 +0000 UTC m=+907.331024683" lastFinishedPulling="2026-02-01 07:39:06.350477244 +0000 UTC m=+945.073575479" observedRunningTime="2026-02-01 07:39:07.043487308 +0000 UTC m=+945.766585553" watchObservedRunningTime="2026-02-01 07:39:07.973475367 +0000 UTC m=+946.696573612" Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.981298 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8s7nw"] Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.984634 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.991964 4650 generic.go:334] "Generic (PLEG): container finished" podID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerID="97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c" exitCode=0 Feb 01 07:39:07 crc kubenswrapper[4650]: I0201 07:39:07.992035 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f7lrk" event={"ID":"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3","Type":"ContainerDied","Data":"97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c"} Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.001853 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" event={"ID":"c6a2ff37-375f-45b0-bcda-e88907fe869e","Type":"ContainerStarted","Data":"49c13310d8200199b0667cfff90854b808ab093aeb500b37402e404d71a0499d"} Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.009152 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8s7nw"] Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.069348 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-c89v2" podStartSLOduration=4.190267042 podStartE2EDuration="42.06933085s" podCreationTimestamp="2026-02-01 07:38:26 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.761337079 +0000 UTC m=+907.484435314" lastFinishedPulling="2026-02-01 07:39:06.640400877 +0000 UTC m=+945.363499122" observedRunningTime="2026-02-01 07:39:08.065191581 +0000 UTC m=+946.788289836" watchObservedRunningTime="2026-02-01 07:39:08.06933085 +0000 UTC m=+946.792429095" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.109359 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vhtm\" (UniqueName: \"kubernetes.io/projected/40033fe9-18cb-4c60-8ff7-80309f3eb886-kube-api-access-7vhtm\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.109431 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-catalog-content\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.109459 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-utilities\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.210159 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-catalog-content\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.210206 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-utilities\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.210280 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vhtm\" (UniqueName: \"kubernetes.io/projected/40033fe9-18cb-4c60-8ff7-80309f3eb886-kube-api-access-7vhtm\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.210858 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-catalog-content\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.211086 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-utilities\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.242191 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vhtm\" (UniqueName: \"kubernetes.io/projected/40033fe9-18cb-4c60-8ff7-80309f3eb886-kube-api-access-7vhtm\") pod \"certified-operators-8s7nw\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.314186 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.732463 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-67485c4bf6-7xmf2" Feb 01 07:39:08 crc kubenswrapper[4650]: I0201 07:39:08.889990 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8s7nw"] Feb 01 07:39:09 crc kubenswrapper[4650]: I0201 07:39:09.014664 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="a4559927c25b5172e0bb51589b156030237e4552bdad01ea0a510262dabc0be0" exitCode=0 Feb 01 07:39:09 crc kubenswrapper[4650]: I0201 07:39:09.014726 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"a4559927c25b5172e0bb51589b156030237e4552bdad01ea0a510262dabc0be0"} Feb 01 07:39:09 crc kubenswrapper[4650]: I0201 07:39:09.014774 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"70e34c59087428be1d52cbbc9d3e74901ae2b55868cca05d2ac2b1cb47ec233d"} Feb 01 07:39:09 crc kubenswrapper[4650]: I0201 07:39:09.014792 4650 scope.go:117] "RemoveContainer" containerID="7e65f3cb8796ac73cb10f7e5fd38e53569d05a3301373f4ee87f64447301307a" Feb 01 07:39:09 crc kubenswrapper[4650]: I0201 07:39:09.018335 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f7lrk" event={"ID":"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3","Type":"ContainerStarted","Data":"1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d"} Feb 01 07:39:09 crc kubenswrapper[4650]: I0201 07:39:09.019908 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8s7nw" event={"ID":"40033fe9-18cb-4c60-8ff7-80309f3eb886","Type":"ContainerStarted","Data":"fca9105628f6e735a061e59b2c2237e9e1dfe67e0f20548b7fe146d89970ce4c"} Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.030501 4650 generic.go:334] "Generic (PLEG): container finished" podID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerID="60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012" exitCode=0 Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.031093 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8s7nw" event={"ID":"40033fe9-18cb-4c60-8ff7-80309f3eb886","Type":"ContainerDied","Data":"60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012"} Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.035161 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" event={"ID":"e82b6e7a-07b2-4ad3-a94e-70a7c398a401","Type":"ContainerStarted","Data":"2f1101809afa2cb9a27f733368d6268b1fc9ea4dad900eac2c041a42823cea80"} Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.035617 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.059055 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-f7lrk" podStartSLOduration=12.678115071 podStartE2EDuration="20.059016054s" podCreationTimestamp="2026-02-01 07:38:50 +0000 UTC" firstStartedPulling="2026-02-01 07:39:01.007610982 +0000 UTC m=+939.730709267" lastFinishedPulling="2026-02-01 07:39:08.388512005 +0000 UTC m=+947.111610250" observedRunningTime="2026-02-01 07:39:09.082753713 +0000 UTC m=+947.805851968" watchObservedRunningTime="2026-02-01 07:39:10.059016054 +0000 UTC m=+948.782114289" Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.088478 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" podStartSLOduration=4.199592375 podStartE2EDuration="45.088442132s" podCreationTimestamp="2026-02-01 07:38:25 +0000 UTC" firstStartedPulling="2026-02-01 07:38:28.576807976 +0000 UTC m=+907.299906221" lastFinishedPulling="2026-02-01 07:39:09.465657733 +0000 UTC m=+948.188755978" observedRunningTime="2026-02-01 07:39:10.087445825 +0000 UTC m=+948.810544070" watchObservedRunningTime="2026-02-01 07:39:10.088442132 +0000 UTC m=+948.811540377" Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.461246 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:39:10 crc kubenswrapper[4650]: I0201 07:39:10.461290 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:39:11 crc kubenswrapper[4650]: I0201 07:39:11.078734 4650 generic.go:334] "Generic (PLEG): container finished" podID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerID="c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce" exitCode=0 Feb 01 07:39:11 crc kubenswrapper[4650]: I0201 07:39:11.078849 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8s7nw" event={"ID":"40033fe9-18cb-4c60-8ff7-80309f3eb886","Type":"ContainerDied","Data":"c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce"} Feb 01 07:39:11 crc kubenswrapper[4650]: I0201 07:39:11.509430 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f7lrk" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="registry-server" probeResult="failure" output=< Feb 01 07:39:11 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 07:39:11 crc kubenswrapper[4650]: > Feb 01 07:39:11 crc kubenswrapper[4650]: I0201 07:39:11.766513 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79955696d6-v2tmb" Feb 01 07:39:12 crc kubenswrapper[4650]: I0201 07:39:12.107634 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9" Feb 01 07:39:15 crc kubenswrapper[4650]: I0201 07:39:15.107360 4650 generic.go:334] "Generic (PLEG): container finished" podID="6e914514-b71c-4747-8dfc-ae1eeef3c8a3" containerID="2e931a39bdd09a3f60ebed0e8b657013c8694cd6ccbad4e7b0f56c6abcc5e4c6" exitCode=0 Feb 01 07:39:15 crc kubenswrapper[4650]: I0201 07:39:15.107469 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p6dnj" event={"ID":"6e914514-b71c-4747-8dfc-ae1eeef3c8a3","Type":"ContainerDied","Data":"2e931a39bdd09a3f60ebed0e8b657013c8694cd6ccbad4e7b0f56c6abcc5e4c6"} Feb 01 07:39:15 crc kubenswrapper[4650]: I0201 07:39:15.111286 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8s7nw" event={"ID":"40033fe9-18cb-4c60-8ff7-80309f3eb886","Type":"ContainerStarted","Data":"9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98"} Feb 01 07:39:15 crc kubenswrapper[4650]: I0201 07:39:15.160366 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8s7nw" podStartSLOduration=3.783217563 podStartE2EDuration="8.160342272s" podCreationTimestamp="2026-02-01 07:39:07 +0000 UTC" firstStartedPulling="2026-02-01 07:39:10.033371116 +0000 UTC m=+948.756469361" lastFinishedPulling="2026-02-01 07:39:14.410495825 +0000 UTC m=+953.133594070" observedRunningTime="2026-02-01 07:39:15.152136355 +0000 UTC m=+953.875234620" watchObservedRunningTime="2026-02-01 07:39:15.160342272 +0000 UTC m=+953.883440527" Feb 01 07:39:15 crc kubenswrapper[4650]: I0201 07:39:15.709266 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-9wdtd" Feb 01 07:39:15 crc kubenswrapper[4650]: I0201 07:39:15.796952 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-zg5gd" Feb 01 07:39:16 crc kubenswrapper[4650]: I0201 07:39:16.040839 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" Feb 01 07:39:16 crc kubenswrapper[4650]: I0201 07:39:16.042815 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-z2hrc" Feb 01 07:39:16 crc kubenswrapper[4650]: I0201 07:39:16.120967 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p6dnj" event={"ID":"6e914514-b71c-4747-8dfc-ae1eeef3c8a3","Type":"ContainerStarted","Data":"7caa106e12af05f65e30050456d10098db7391166b65a4eae02737644bbf25a8"} Feb 01 07:39:16 crc kubenswrapper[4650]: I0201 07:39:16.142436 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-p6dnj" podStartSLOduration=5.632783163 podStartE2EDuration="14.142415207s" podCreationTimestamp="2026-02-01 07:39:02 +0000 UTC" firstStartedPulling="2026-02-01 07:39:06.974251789 +0000 UTC m=+945.697350034" lastFinishedPulling="2026-02-01 07:39:15.483883833 +0000 UTC m=+954.206982078" observedRunningTime="2026-02-01 07:39:16.138431171 +0000 UTC m=+954.861529426" watchObservedRunningTime="2026-02-01 07:39:16.142415207 +0000 UTC m=+954.865513472" Feb 01 07:39:16 crc kubenswrapper[4650]: I0201 07:39:16.203807 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-74flx" Feb 01 07:39:16 crc kubenswrapper[4650]: I0201 07:39:16.285172 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-ktrgf" Feb 01 07:39:16 crc kubenswrapper[4650]: I0201 07:39:16.368689 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-jbbh5" Feb 01 07:39:18 crc kubenswrapper[4650]: I0201 07:39:18.314301 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:18 crc kubenswrapper[4650]: I0201 07:39:18.314595 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:19 crc kubenswrapper[4650]: I0201 07:39:19.366980 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-8s7nw" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="registry-server" probeResult="failure" output=< Feb 01 07:39:19 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 07:39:19 crc kubenswrapper[4650]: > Feb 01 07:39:20 crc kubenswrapper[4650]: I0201 07:39:20.516724 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:39:20 crc kubenswrapper[4650]: I0201 07:39:20.578365 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:39:21 crc kubenswrapper[4650]: I0201 07:39:21.328462 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f7lrk"] Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.166247 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-f7lrk" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="registry-server" containerID="cri-o://1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d" gracePeriod=2 Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.534318 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.534367 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.540189 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.592154 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.634463 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-utilities\") pod \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.634619 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdp5j\" (UniqueName: \"kubernetes.io/projected/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-kube-api-access-rdp5j\") pod \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.634646 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-catalog-content\") pod \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\" (UID: \"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3\") " Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.635328 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-utilities" (OuterVolumeSpecName: "utilities") pod "f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" (UID: "f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.636017 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.645566 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-kube-api-access-rdp5j" (OuterVolumeSpecName: "kube-api-access-rdp5j") pod "f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" (UID: "f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3"). InnerVolumeSpecName "kube-api-access-rdp5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.737755 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdp5j\" (UniqueName: \"kubernetes.io/projected/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-kube-api-access-rdp5j\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.759567 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" (UID: "f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:39:22 crc kubenswrapper[4650]: I0201 07:39:22.838383 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.179446 4650 generic.go:334] "Generic (PLEG): container finished" podID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerID="1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d" exitCode=0 Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.179506 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f7lrk" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.179637 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f7lrk" event={"ID":"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3","Type":"ContainerDied","Data":"1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d"} Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.179703 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f7lrk" event={"ID":"f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3","Type":"ContainerDied","Data":"0911270cdbfd7c0337d4770f5ca97e237b99275dc2be9e64aecbc3f1faec66c6"} Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.179732 4650 scope.go:117] "RemoveContainer" containerID="1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.225743 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f7lrk"] Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.235390 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-f7lrk"] Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.235741 4650 scope.go:117] "RemoveContainer" containerID="97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.264145 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-p6dnj" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.266262 4650 scope.go:117] "RemoveContainer" containerID="676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.319423 4650 scope.go:117] "RemoveContainer" containerID="1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d" Feb 01 07:39:23 crc kubenswrapper[4650]: E0201 07:39:23.321368 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d\": container with ID starting with 1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d not found: ID does not exist" containerID="1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.321415 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d"} err="failed to get container status \"1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d\": rpc error: code = NotFound desc = could not find container \"1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d\": container with ID starting with 1fdfbe41c1b1ca583c4fbbe11df60dfffc2709b03b086bdc5a14ab72763c583d not found: ID does not exist" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.321447 4650 scope.go:117] "RemoveContainer" containerID="97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c" Feb 01 07:39:23 crc kubenswrapper[4650]: E0201 07:39:23.321849 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c\": container with ID starting with 97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c not found: ID does not exist" containerID="97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.321886 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c"} err="failed to get container status \"97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c\": rpc error: code = NotFound desc = could not find container \"97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c\": container with ID starting with 97835c02b38737e5b59fe8627325a21cf9b6c523c853c34ddcf81e76351eec3c not found: ID does not exist" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.321909 4650 scope.go:117] "RemoveContainer" containerID="676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b" Feb 01 07:39:23 crc kubenswrapper[4650]: E0201 07:39:23.322335 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b\": container with ID starting with 676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b not found: ID does not exist" containerID="676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.322374 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b"} err="failed to get container status \"676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b\": rpc error: code = NotFound desc = could not find container \"676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b\": container with ID starting with 676d0469fea3b0596b2295be1259e49fd303afc1234f3e7bc0e19108691ec77b not found: ID does not exist" Feb 01 07:39:23 crc kubenswrapper[4650]: I0201 07:39:23.980776 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" path="/var/lib/kubelet/pods/f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3/volumes" Feb 01 07:39:24 crc kubenswrapper[4650]: I0201 07:39:24.571951 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p6dnj"] Feb 01 07:39:24 crc kubenswrapper[4650]: I0201 07:39:24.931989 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-llp7t"] Feb 01 07:39:24 crc kubenswrapper[4650]: I0201 07:39:24.932420 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-llp7t" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="registry-server" containerID="cri-o://35b4b9a383e2198158a1b51014f313d213a35cb1ab6d89b9c3c4876fd49e3c50" gracePeriod=2 Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.207178 4650 generic.go:334] "Generic (PLEG): container finished" podID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerID="35b4b9a383e2198158a1b51014f313d213a35cb1ab6d89b9c3c4876fd49e3c50" exitCode=0 Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.207402 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llp7t" event={"ID":"e223410d-6b2d-464a-8e86-4355dbf698b2","Type":"ContainerDied","Data":"35b4b9a383e2198158a1b51014f313d213a35cb1ab6d89b9c3c4876fd49e3c50"} Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.342279 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.477937 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlkbr\" (UniqueName: \"kubernetes.io/projected/e223410d-6b2d-464a-8e86-4355dbf698b2-kube-api-access-rlkbr\") pod \"e223410d-6b2d-464a-8e86-4355dbf698b2\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.478000 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-utilities\") pod \"e223410d-6b2d-464a-8e86-4355dbf698b2\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.478123 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-catalog-content\") pod \"e223410d-6b2d-464a-8e86-4355dbf698b2\" (UID: \"e223410d-6b2d-464a-8e86-4355dbf698b2\") " Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.480935 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-utilities" (OuterVolumeSpecName: "utilities") pod "e223410d-6b2d-464a-8e86-4355dbf698b2" (UID: "e223410d-6b2d-464a-8e86-4355dbf698b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.483893 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e223410d-6b2d-464a-8e86-4355dbf698b2-kube-api-access-rlkbr" (OuterVolumeSpecName: "kube-api-access-rlkbr") pod "e223410d-6b2d-464a-8e86-4355dbf698b2" (UID: "e223410d-6b2d-464a-8e86-4355dbf698b2"). InnerVolumeSpecName "kube-api-access-rlkbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.540788 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e223410d-6b2d-464a-8e86-4355dbf698b2" (UID: "e223410d-6b2d-464a-8e86-4355dbf698b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.579802 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlkbr\" (UniqueName: \"kubernetes.io/projected/e223410d-6b2d-464a-8e86-4355dbf698b2-kube-api-access-rlkbr\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.580320 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:25 crc kubenswrapper[4650]: I0201 07:39:25.580405 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e223410d-6b2d-464a-8e86-4355dbf698b2-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:26 crc kubenswrapper[4650]: I0201 07:39:26.220407 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llp7t" event={"ID":"e223410d-6b2d-464a-8e86-4355dbf698b2","Type":"ContainerDied","Data":"1f142c3619a581b2911ba26eba4650027b87b48c263daf50e2d0c4e6eaa19195"} Feb 01 07:39:26 crc kubenswrapper[4650]: I0201 07:39:26.220476 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llp7t" Feb 01 07:39:26 crc kubenswrapper[4650]: I0201 07:39:26.220778 4650 scope.go:117] "RemoveContainer" containerID="35b4b9a383e2198158a1b51014f313d213a35cb1ab6d89b9c3c4876fd49e3c50" Feb 01 07:39:26 crc kubenswrapper[4650]: I0201 07:39:26.252252 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-llp7t"] Feb 01 07:39:26 crc kubenswrapper[4650]: I0201 07:39:26.260183 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-llp7t"] Feb 01 07:39:26 crc kubenswrapper[4650]: I0201 07:39:26.266042 4650 scope.go:117] "RemoveContainer" containerID="cc02dbec7219a84173eca6702ab1ee07d02e86b0b429ba3ff0f0e99e6f76caaf" Feb 01 07:39:26 crc kubenswrapper[4650]: I0201 07:39:26.302918 4650 scope.go:117] "RemoveContainer" containerID="a5d911ab05f50f3861d02e496c5c922a3b14926fd707450d668eaefe4635c1df" Feb 01 07:39:27 crc kubenswrapper[4650]: I0201 07:39:27.973967 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" path="/var/lib/kubelet/pods/e223410d-6b2d-464a-8e86-4355dbf698b2/volumes" Feb 01 07:39:28 crc kubenswrapper[4650]: I0201 07:39:28.399235 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:28 crc kubenswrapper[4650]: I0201 07:39:28.459939 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:30 crc kubenswrapper[4650]: I0201 07:39:30.732846 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8s7nw"] Feb 01 07:39:30 crc kubenswrapper[4650]: I0201 07:39:30.734574 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8s7nw" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="registry-server" containerID="cri-o://9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98" gracePeriod=2 Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.198563 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.256280 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vhtm\" (UniqueName: \"kubernetes.io/projected/40033fe9-18cb-4c60-8ff7-80309f3eb886-kube-api-access-7vhtm\") pod \"40033fe9-18cb-4c60-8ff7-80309f3eb886\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.256422 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-catalog-content\") pod \"40033fe9-18cb-4c60-8ff7-80309f3eb886\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.256487 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-utilities\") pod \"40033fe9-18cb-4c60-8ff7-80309f3eb886\" (UID: \"40033fe9-18cb-4c60-8ff7-80309f3eb886\") " Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.260196 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-utilities" (OuterVolumeSpecName: "utilities") pod "40033fe9-18cb-4c60-8ff7-80309f3eb886" (UID: "40033fe9-18cb-4c60-8ff7-80309f3eb886"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.261383 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40033fe9-18cb-4c60-8ff7-80309f3eb886-kube-api-access-7vhtm" (OuterVolumeSpecName: "kube-api-access-7vhtm") pod "40033fe9-18cb-4c60-8ff7-80309f3eb886" (UID: "40033fe9-18cb-4c60-8ff7-80309f3eb886"). InnerVolumeSpecName "kube-api-access-7vhtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.278259 4650 generic.go:334] "Generic (PLEG): container finished" podID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerID="9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98" exitCode=0 Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.278299 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8s7nw" event={"ID":"40033fe9-18cb-4c60-8ff7-80309f3eb886","Type":"ContainerDied","Data":"9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98"} Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.278340 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8s7nw" event={"ID":"40033fe9-18cb-4c60-8ff7-80309f3eb886","Type":"ContainerDied","Data":"fca9105628f6e735a061e59b2c2237e9e1dfe67e0f20548b7fe146d89970ce4c"} Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.278356 4650 scope.go:117] "RemoveContainer" containerID="9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.278494 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8s7nw" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.302917 4650 scope.go:117] "RemoveContainer" containerID="c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.324265 4650 scope.go:117] "RemoveContainer" containerID="60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.324881 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40033fe9-18cb-4c60-8ff7-80309f3eb886" (UID: "40033fe9-18cb-4c60-8ff7-80309f3eb886"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.341101 4650 scope.go:117] "RemoveContainer" containerID="9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98" Feb 01 07:39:31 crc kubenswrapper[4650]: E0201 07:39:31.342043 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98\": container with ID starting with 9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98 not found: ID does not exist" containerID="9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.342147 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98"} err="failed to get container status \"9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98\": rpc error: code = NotFound desc = could not find container \"9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98\": container with ID starting with 9c07a87f86c29225036c52a5014a92ae23862f34cb56aa47c597aa3252889f98 not found: ID does not exist" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.342237 4650 scope.go:117] "RemoveContainer" containerID="c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce" Feb 01 07:39:31 crc kubenswrapper[4650]: E0201 07:39:31.345349 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce\": container with ID starting with c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce not found: ID does not exist" containerID="c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.345406 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce"} err="failed to get container status \"c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce\": rpc error: code = NotFound desc = could not find container \"c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce\": container with ID starting with c8958b2e9f15a070aa7ab43d34afc6d0776633d62b858067fc63e25a9f00d6ce not found: ID does not exist" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.345448 4650 scope.go:117] "RemoveContainer" containerID="60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012" Feb 01 07:39:31 crc kubenswrapper[4650]: E0201 07:39:31.345767 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012\": container with ID starting with 60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012 not found: ID does not exist" containerID="60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.345805 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012"} err="failed to get container status \"60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012\": rpc error: code = NotFound desc = could not find container \"60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012\": container with ID starting with 60c1920085a25c9ccdb3a7557fa9cdda3ef100b3b32116126d3f74604a3db012 not found: ID does not exist" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.357905 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.357928 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40033fe9-18cb-4c60-8ff7-80309f3eb886-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.357937 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vhtm\" (UniqueName: \"kubernetes.io/projected/40033fe9-18cb-4c60-8ff7-80309f3eb886-kube-api-access-7vhtm\") on node \"crc\" DevicePath \"\"" Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.605428 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8s7nw"] Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.610872 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8s7nw"] Feb 01 07:39:31 crc kubenswrapper[4650]: I0201 07:39:31.976315 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" path="/var/lib/kubelet/pods/40033fe9-18cb-4c60-8ff7-80309f3eb886/volumes" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.974925 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bmdn"] Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.978906 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="extract-utilities" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.978995 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="extract-utilities" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979101 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="extract-content" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979155 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="extract-content" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979214 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="extract-content" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979272 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="extract-content" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979335 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979384 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979440 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979487 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979548 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="extract-content" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979603 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="extract-content" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979665 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="extract-utilities" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979714 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="extract-utilities" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979767 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="extract-utilities" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979817 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="extract-utilities" Feb 01 07:39:32 crc kubenswrapper[4650]: E0201 07:39:32.979873 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.979925 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.980122 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="40033fe9-18cb-4c60-8ff7-80309f3eb886" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.980191 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="e223410d-6b2d-464a-8e86-4355dbf698b2" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.980248 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f248938c-a9ff-46b0-b4f9-a0b5cb5dd1f3" containerName="registry-server" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.981035 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.985363 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.985535 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.985740 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-7kvs4" Feb 01 07:39:32 crc kubenswrapper[4650]: I0201 07:39:32.985836 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.000833 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bmdn"] Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.082793 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/061abe73-8375-4525-914a-b5e06db7e5db-config\") pod \"dnsmasq-dns-675f4bcbfc-4bmdn\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.082892 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89ttl\" (UniqueName: \"kubernetes.io/projected/061abe73-8375-4525-914a-b5e06db7e5db-kube-api-access-89ttl\") pod \"dnsmasq-dns-675f4bcbfc-4bmdn\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.083479 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4ldzd"] Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.084632 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.090483 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.095755 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4ldzd"] Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.183949 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4747q\" (UniqueName: \"kubernetes.io/projected/748cc059-b05f-4d89-a669-b295e49a39bd-kube-api-access-4747q\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.184069 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89ttl\" (UniqueName: \"kubernetes.io/projected/061abe73-8375-4525-914a-b5e06db7e5db-kube-api-access-89ttl\") pod \"dnsmasq-dns-675f4bcbfc-4bmdn\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.184193 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-config\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.184345 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.184399 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/061abe73-8375-4525-914a-b5e06db7e5db-config\") pod \"dnsmasq-dns-675f4bcbfc-4bmdn\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.185173 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/061abe73-8375-4525-914a-b5e06db7e5db-config\") pod \"dnsmasq-dns-675f4bcbfc-4bmdn\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.232940 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89ttl\" (UniqueName: \"kubernetes.io/projected/061abe73-8375-4525-914a-b5e06db7e5db-kube-api-access-89ttl\") pod \"dnsmasq-dns-675f4bcbfc-4bmdn\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.285622 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-config\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.285693 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.285724 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4747q\" (UniqueName: \"kubernetes.io/projected/748cc059-b05f-4d89-a669-b295e49a39bd-kube-api-access-4747q\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.286690 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.286688 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-config\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.301238 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.324964 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4747q\" (UniqueName: \"kubernetes.io/projected/748cc059-b05f-4d89-a669-b295e49a39bd-kube-api-access-4747q\") pod \"dnsmasq-dns-78dd6ddcc-4ldzd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.405472 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.870835 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bmdn"] Feb 01 07:39:33 crc kubenswrapper[4650]: W0201 07:39:33.872582 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod061abe73_8375_4525_914a_b5e06db7e5db.slice/crio-0aea3ae370c0cc28f4f7e80b22b5132285ce7fcf6a8b16d64b2f2cfebff7991e WatchSource:0}: Error finding container 0aea3ae370c0cc28f4f7e80b22b5132285ce7fcf6a8b16d64b2f2cfebff7991e: Status 404 returned error can't find the container with id 0aea3ae370c0cc28f4f7e80b22b5132285ce7fcf6a8b16d64b2f2cfebff7991e Feb 01 07:39:33 crc kubenswrapper[4650]: I0201 07:39:33.931125 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4ldzd"] Feb 01 07:39:34 crc kubenswrapper[4650]: I0201 07:39:34.303204 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" event={"ID":"061abe73-8375-4525-914a-b5e06db7e5db","Type":"ContainerStarted","Data":"0aea3ae370c0cc28f4f7e80b22b5132285ce7fcf6a8b16d64b2f2cfebff7991e"} Feb 01 07:39:34 crc kubenswrapper[4650]: I0201 07:39:34.304338 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" event={"ID":"748cc059-b05f-4d89-a669-b295e49a39bd","Type":"ContainerStarted","Data":"ee1abde9a68e76325ef57f9308ac55282a1395b6f2b8487c768e7bf5fe5a2abb"} Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.031883 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bmdn"] Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.038304 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cj2gg"] Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.039618 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.056978 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cj2gg"] Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.168106 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-dns-svc\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.168150 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-config\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.168217 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnhp4\" (UniqueName: \"kubernetes.io/projected/81f3af79-80fc-4bd4-a429-82d9f42ef456-kube-api-access-qnhp4\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.269261 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnhp4\" (UniqueName: \"kubernetes.io/projected/81f3af79-80fc-4bd4-a429-82d9f42ef456-kube-api-access-qnhp4\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.269336 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-dns-svc\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.269361 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-config\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.270212 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-config\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.270280 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-dns-svc\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.292771 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnhp4\" (UniqueName: \"kubernetes.io/projected/81f3af79-80fc-4bd4-a429-82d9f42ef456-kube-api-access-qnhp4\") pod \"dnsmasq-dns-666b6646f7-cj2gg\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.388746 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.439007 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4ldzd"] Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.469301 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-bhfwv"] Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.470281 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.496301 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-bhfwv"] Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.575067 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.575100 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-config\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.575141 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp9td\" (UniqueName: \"kubernetes.io/projected/b2bab93e-0469-4ee5-841a-bca36667a835-kube-api-access-rp9td\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.675888 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp9td\" (UniqueName: \"kubernetes.io/projected/b2bab93e-0469-4ee5-841a-bca36667a835-kube-api-access-rp9td\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.675996 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-config\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.676016 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.677255 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.677502 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-config\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.711843 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp9td\" (UniqueName: \"kubernetes.io/projected/b2bab93e-0469-4ee5-841a-bca36667a835-kube-api-access-rp9td\") pod \"dnsmasq-dns-57d769cc4f-bhfwv\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.826113 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:39:36 crc kubenswrapper[4650]: I0201 07:39:36.830876 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cj2gg"] Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.236522 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.242396 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.244928 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.245842 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.246002 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.246102 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-b8zkd" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.246231 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.246321 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.246408 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.272228 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289156 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8w4c\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-kube-api-access-s8w4c\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289284 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-server-conf\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289625 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289651 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/722b2919-c0d6-4596-82cc-5ae2b5951263-pod-info\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289711 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289774 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/722b2919-c0d6-4596-82cc-5ae2b5951263-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289796 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289840 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289860 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.289875 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-config-data\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.290074 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.348923 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-bhfwv"] Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.388786 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" event={"ID":"81f3af79-80fc-4bd4-a429-82d9f42ef456","Type":"ContainerStarted","Data":"ac021193874234dcc4e9d2de7f07f3cacad5bc43e25043f8256262f0a484a698"} Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391624 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8w4c\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-kube-api-access-s8w4c\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391676 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-server-conf\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391706 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391728 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/722b2919-c0d6-4596-82cc-5ae2b5951263-pod-info\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391743 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391767 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/722b2919-c0d6-4596-82cc-5ae2b5951263-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391787 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391802 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391822 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391838 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-config-data\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.391852 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.392629 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.393051 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.393699 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.394474 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-server-conf\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.395015 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/722b2919-c0d6-4596-82cc-5ae2b5951263-config-data\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.397668 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.401569 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/722b2919-c0d6-4596-82cc-5ae2b5951263-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.401818 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.403584 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.418311 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.418828 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/722b2919-c0d6-4596-82cc-5ae2b5951263-pod-info\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.427413 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8w4c\" (UniqueName: \"kubernetes.io/projected/722b2919-c0d6-4596-82cc-5ae2b5951263-kube-api-access-s8w4c\") pod \"rabbitmq-server-0\" (UID: \"722b2919-c0d6-4596-82cc-5ae2b5951263\") " pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.567366 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.636974 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.638104 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.650213 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-9cjwg" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.650372 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.650624 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.650851 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.650945 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.651062 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.652441 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.656292 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696576 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696624 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9c378d90-fab5-4d68-9aba-892645206b97-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696662 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696697 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696715 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs8lq\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-kube-api-access-qs8lq\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696732 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696754 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9c378d90-fab5-4d68-9aba-892645206b97-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696777 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696795 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696820 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.696843 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797323 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9c378d90-fab5-4d68-9aba-892645206b97-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797366 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797390 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797415 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797438 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797474 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797496 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9c378d90-fab5-4d68-9aba-892645206b97-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797521 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797559 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797577 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qs8lq\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-kube-api-access-qs8lq\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797596 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.797986 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.798134 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.798493 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.800503 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.801042 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c378d90-fab5-4d68-9aba-892645206b97-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.801309 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.812349 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9c378d90-fab5-4d68-9aba-892645206b97-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.813889 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9c378d90-fab5-4d68-9aba-892645206b97-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.814384 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.815101 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.834265 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qs8lq\" (UniqueName: \"kubernetes.io/projected/9c378d90-fab5-4d68-9aba-892645206b97-kube-api-access-qs8lq\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.846658 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c378d90-fab5-4d68-9aba-892645206b97\") " pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.968447 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:39:37 crc kubenswrapper[4650]: I0201 07:39:37.993446 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 01 07:39:38 crc kubenswrapper[4650]: W0201 07:39:38.020957 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod722b2919_c0d6_4596_82cc_5ae2b5951263.slice/crio-3585b87d2de0eab3483403c322d9d0f5cfa026d6e1eebb103ee14c022c0dd0f4 WatchSource:0}: Error finding container 3585b87d2de0eab3483403c322d9d0f5cfa026d6e1eebb103ee14c022c0dd0f4: Status 404 returned error can't find the container with id 3585b87d2de0eab3483403c322d9d0f5cfa026d6e1eebb103ee14c022c0dd0f4 Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.403018 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"722b2919-c0d6-4596-82cc-5ae2b5951263","Type":"ContainerStarted","Data":"3585b87d2de0eab3483403c322d9d0f5cfa026d6e1eebb103ee14c022c0dd0f4"} Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.411307 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" event={"ID":"b2bab93e-0469-4ee5-841a-bca36667a835","Type":"ContainerStarted","Data":"18495a1a19babf48926fc04c73e2a3abe34e4f1eaedc574b9d7eba08c1b603a4"} Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.460656 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 01 07:39:38 crc kubenswrapper[4650]: W0201 07:39:38.519601 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c378d90_fab5_4d68_9aba_892645206b97.slice/crio-dc6c9ca8b6ab5907b53a7d89e9ad82b97f60a9cc9fa02a3aed2eb7773b2e6829 WatchSource:0}: Error finding container dc6c9ca8b6ab5907b53a7d89e9ad82b97f60a9cc9fa02a3aed2eb7773b2e6829: Status 404 returned error can't find the container with id dc6c9ca8b6ab5907b53a7d89e9ad82b97f60a9cc9fa02a3aed2eb7773b2e6829 Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.618362 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.619406 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.623213 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.624700 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.628934 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.629773 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-48cj6" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.645762 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.648262 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.733533 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.733782 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.733945 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-config-data-default\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.733965 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.734016 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s859p\" (UniqueName: \"kubernetes.io/projected/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-kube-api-access-s859p\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.734073 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.734088 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.734106 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-kolla-config\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837343 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837403 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-config-data-default\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837421 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837458 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s859p\" (UniqueName: \"kubernetes.io/projected/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-kube-api-access-s859p\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837486 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837501 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837519 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-kolla-config\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.837541 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.838741 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.838977 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-kolla-config\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.839352 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.839649 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.839825 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-config-data-default\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.842980 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.852008 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.867156 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s859p\" (UniqueName: \"kubernetes.io/projected/ce8e76c5-52b4-46aa-b009-181f08e5cdc7-kube-api-access-s859p\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.891690 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ce8e76c5-52b4-46aa-b009-181f08e5cdc7\") " pod="openstack/openstack-galera-0" Feb 01 07:39:38 crc kubenswrapper[4650]: I0201 07:39:38.940190 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 01 07:39:39 crc kubenswrapper[4650]: I0201 07:39:39.450860 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c378d90-fab5-4d68-9aba-892645206b97","Type":"ContainerStarted","Data":"dc6c9ca8b6ab5907b53a7d89e9ad82b97f60a9cc9fa02a3aed2eb7773b2e6829"} Feb 01 07:39:39 crc kubenswrapper[4650]: I0201 07:39:39.578902 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 01 07:39:39 crc kubenswrapper[4650]: W0201 07:39:39.677060 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce8e76c5_52b4_46aa_b009_181f08e5cdc7.slice/crio-2fa2df9305d7c6c2dbcfd443abb1f82834d540933440ed2e758f58a17e4980c5 WatchSource:0}: Error finding container 2fa2df9305d7c6c2dbcfd443abb1f82834d540933440ed2e758f58a17e4980c5: Status 404 returned error can't find the container with id 2fa2df9305d7c6c2dbcfd443abb1f82834d540933440ed2e758f58a17e4980c5 Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.086906 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.090894 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.093177 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-l8546" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.093633 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.093817 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.096441 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.107723 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168699 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4gfk\" (UniqueName: \"kubernetes.io/projected/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-kube-api-access-s4gfk\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168758 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168796 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168822 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168845 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168867 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168897 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.168923 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.256326 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.257149 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.266839 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.266869 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.267102 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-chfx9" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.269920 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.269979 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270011 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270040 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270062 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270096 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270119 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270149 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4gfk\" (UniqueName: \"kubernetes.io/projected/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-kube-api-access-s4gfk\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270656 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.270903 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.274212 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.275962 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.277785 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.291449 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.293122 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.294643 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.306183 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.349972 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4gfk\" (UniqueName: \"kubernetes.io/projected/a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac-kube-api-access-s4gfk\") pod \"openstack-cell1-galera-0\" (UID: \"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac\") " pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.390428 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pw7w\" (UniqueName: \"kubernetes.io/projected/ca494288-bfa9-474d-8805-21226c9d7cbd-kube-api-access-9pw7w\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.390548 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca494288-bfa9-474d-8805-21226c9d7cbd-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.390584 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca494288-bfa9-474d-8805-21226c9d7cbd-config-data\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.390635 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca494288-bfa9-474d-8805-21226c9d7cbd-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.390706 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ca494288-bfa9-474d-8805-21226c9d7cbd-kolla-config\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.443426 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.496175 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca494288-bfa9-474d-8805-21226c9d7cbd-config-data\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.496225 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca494288-bfa9-474d-8805-21226c9d7cbd-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.496282 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ca494288-bfa9-474d-8805-21226c9d7cbd-kolla-config\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.496320 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pw7w\" (UniqueName: \"kubernetes.io/projected/ca494288-bfa9-474d-8805-21226c9d7cbd-kube-api-access-9pw7w\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.496370 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca494288-bfa9-474d-8805-21226c9d7cbd-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.496961 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca494288-bfa9-474d-8805-21226c9d7cbd-config-data\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.499974 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ca494288-bfa9-474d-8805-21226c9d7cbd-kolla-config\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.516831 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca494288-bfa9-474d-8805-21226c9d7cbd-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.517813 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca494288-bfa9-474d-8805-21226c9d7cbd-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.520238 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pw7w\" (UniqueName: \"kubernetes.io/projected/ca494288-bfa9-474d-8805-21226c9d7cbd-kube-api-access-9pw7w\") pod \"memcached-0\" (UID: \"ca494288-bfa9-474d-8805-21226c9d7cbd\") " pod="openstack/memcached-0" Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.539225 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ce8e76c5-52b4-46aa-b009-181f08e5cdc7","Type":"ContainerStarted","Data":"2fa2df9305d7c6c2dbcfd443abb1f82834d540933440ed2e758f58a17e4980c5"} Feb 01 07:39:40 crc kubenswrapper[4650]: I0201 07:39:40.678087 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 01 07:39:41 crc kubenswrapper[4650]: I0201 07:39:41.085253 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 01 07:39:41 crc kubenswrapper[4650]: I0201 07:39:41.204832 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 01 07:39:41 crc kubenswrapper[4650]: W0201 07:39:41.225693 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca494288_bfa9_474d_8805_21226c9d7cbd.slice/crio-caa281d51c93fb39b1b26c62a48f63c14449a848ce78006be5b922063d9aa0a3 WatchSource:0}: Error finding container caa281d51c93fb39b1b26c62a48f63c14449a848ce78006be5b922063d9aa0a3: Status 404 returned error can't find the container with id caa281d51c93fb39b1b26c62a48f63c14449a848ce78006be5b922063d9aa0a3 Feb 01 07:39:41 crc kubenswrapper[4650]: I0201 07:39:41.586012 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ca494288-bfa9-474d-8805-21226c9d7cbd","Type":"ContainerStarted","Data":"caa281d51c93fb39b1b26c62a48f63c14449a848ce78006be5b922063d9aa0a3"} Feb 01 07:39:41 crc kubenswrapper[4650]: I0201 07:39:41.587914 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac","Type":"ContainerStarted","Data":"5e2d627db71e091d993d473dfe8805f2eea68a47a9b009905c5deddacbabaad3"} Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.236568 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.239581 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.244084 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-ds6gh" Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.244628 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.333012 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kjqv\" (UniqueName: \"kubernetes.io/projected/22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf-kube-api-access-8kjqv\") pod \"kube-state-metrics-0\" (UID: \"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf\") " pod="openstack/kube-state-metrics-0" Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.435577 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kjqv\" (UniqueName: \"kubernetes.io/projected/22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf-kube-api-access-8kjqv\") pod \"kube-state-metrics-0\" (UID: \"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf\") " pod="openstack/kube-state-metrics-0" Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.459934 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kjqv\" (UniqueName: \"kubernetes.io/projected/22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf-kube-api-access-8kjqv\") pod \"kube-state-metrics-0\" (UID: \"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf\") " pod="openstack/kube-state-metrics-0" Feb 01 07:39:42 crc kubenswrapper[4650]: I0201 07:39:42.573363 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 01 07:39:43 crc kubenswrapper[4650]: I0201 07:39:43.584768 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:39:43 crc kubenswrapper[4650]: I0201 07:39:43.722663 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf","Type":"ContainerStarted","Data":"91ebfdceac486dfa12d0b8bf5ab5981a94bfa0f3ffa97147bba710599287763a"} Feb 01 07:39:45 crc kubenswrapper[4650]: I0201 07:39:45.981272 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-df4vg"] Feb 01 07:39:45 crc kubenswrapper[4650]: I0201 07:39:45.982828 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-9xcg8"] Feb 01 07:39:45 crc kubenswrapper[4650]: I0201 07:39:45.985276 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:45 crc kubenswrapper[4650]: I0201 07:39:45.985279 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg" Feb 01 07:39:45 crc kubenswrapper[4650]: I0201 07:39:45.986650 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg"] Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.005297 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-nd8pf" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.005648 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.006256 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.018382 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-9xcg8"] Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.137982 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-run\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138104 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-lib\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138140 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwf7h\" (UniqueName: \"kubernetes.io/projected/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-kube-api-access-mwf7h\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138166 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-combined-ca-bundle\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138187 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-scripts\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138205 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn6cp\" (UniqueName: \"kubernetes.io/projected/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-kube-api-access-sn6cp\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138223 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-etc-ovs\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138272 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-run-ovn\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138288 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-run\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138309 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-log-ovn\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138543 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-log\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138775 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-ovn-controller-tls-certs\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.138806 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-scripts\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.240738 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-log\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241129 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-ovn-controller-tls-certs\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241176 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-scripts\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241199 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-run\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241219 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-lib\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241243 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwf7h\" (UniqueName: \"kubernetes.io/projected/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-kube-api-access-mwf7h\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241265 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-combined-ca-bundle\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241313 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-log\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241325 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-scripts\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241387 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn6cp\" (UniqueName: \"kubernetes.io/projected/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-kube-api-access-sn6cp\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241415 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-etc-ovs\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.241829 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-run\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.243342 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-run-ovn\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.243389 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-run\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.243423 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-log-ovn\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.243568 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-etc-ovs\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.243661 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-log-ovn\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.243766 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-var-run-ovn\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.243813 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-run\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.244147 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-var-lib\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.245189 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-scripts\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.245831 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-scripts\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.286822 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-ovn-controller-tls-certs\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.289462 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwf7h\" (UniqueName: \"kubernetes.io/projected/0a714fe2-3b81-4e99-8596-1b7ccd8d913c-kube-api-access-mwf7h\") pod \"ovn-controller-ovs-9xcg8\" (UID: \"0a714fe2-3b81-4e99-8596-1b7ccd8d913c\") " pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.301751 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-combined-ca-bundle\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.302353 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn6cp\" (UniqueName: \"kubernetes.io/projected/0eea2c6a-8650-4a55-aab9-0b27b8e829b4-kube-api-access-sn6cp\") pod \"ovn-controller-df4vg\" (UID: \"0eea2c6a-8650-4a55-aab9-0b27b8e829b4\") " pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.367253 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.386149 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.457337 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.462318 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.465958 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-mrt5j" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.465964 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.466058 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.466203 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.467577 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.472497 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.649632 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48tzf\" (UniqueName: \"kubernetes.io/projected/d74f3701-5d40-45dd-8a2d-041cdb5a8720-kube-api-access-48tzf\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.649803 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.649890 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.650099 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d74f3701-5d40-45dd-8a2d-041cdb5a8720-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.650138 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d74f3701-5d40-45dd-8a2d-041cdb5a8720-config\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.650287 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.650355 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d74f3701-5d40-45dd-8a2d-041cdb5a8720-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.650372 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751228 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d74f3701-5d40-45dd-8a2d-041cdb5a8720-config\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751289 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751312 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d74f3701-5d40-45dd-8a2d-041cdb5a8720-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751329 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751348 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48tzf\" (UniqueName: \"kubernetes.io/projected/d74f3701-5d40-45dd-8a2d-041cdb5a8720-kube-api-access-48tzf\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751390 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751410 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.751463 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d74f3701-5d40-45dd-8a2d-041cdb5a8720-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.752725 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d74f3701-5d40-45dd-8a2d-041cdb5a8720-config\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.753109 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.754186 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d74f3701-5d40-45dd-8a2d-041cdb5a8720-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.757290 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d74f3701-5d40-45dd-8a2d-041cdb5a8720-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.783766 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.787404 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.790954 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d74f3701-5d40-45dd-8a2d-041cdb5a8720-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.800408 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:46 crc kubenswrapper[4650]: I0201 07:39:46.824765 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48tzf\" (UniqueName: \"kubernetes.io/projected/d74f3701-5d40-45dd-8a2d-041cdb5a8720-kube-api-access-48tzf\") pod \"ovsdbserver-nb-0\" (UID: \"d74f3701-5d40-45dd-8a2d-041cdb5a8720\") " pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:47 crc kubenswrapper[4650]: I0201 07:39:47.087727 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 01 07:39:48 crc kubenswrapper[4650]: I0201 07:39:48.995014 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.000310 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.013766 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.013996 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.014215 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.014372 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-8shtm" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.025548 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110125 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110179 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110211 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110232 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110250 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-config\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110269 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110310 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84zgk\" (UniqueName: \"kubernetes.io/projected/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-kube-api-access-84zgk\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.110329 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212588 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212671 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212719 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212750 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212778 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-config\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212806 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212864 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84zgk\" (UniqueName: \"kubernetes.io/projected/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-kube-api-access-84zgk\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.212895 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.213616 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.214582 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.216543 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.217935 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-config\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.221399 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.221431 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.232087 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.239171 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.239854 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84zgk\" (UniqueName: \"kubernetes.io/projected/aa3b9991-cc4c-437b-aa76-a0bb01050b1d-kube-api-access-84zgk\") pod \"ovsdbserver-sb-0\" (UID: \"aa3b9991-cc4c-437b-aa76-a0bb01050b1d\") " pod="openstack/ovsdbserver-sb-0" Feb 01 07:39:49 crc kubenswrapper[4650]: I0201 07:39:49.344117 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.185705 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x2hwt"] Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.189512 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.193904 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2hwt"] Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.333610 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-catalog-content\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.333992 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-utilities\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.334132 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndp72\" (UniqueName: \"kubernetes.io/projected/e6b5003e-5d03-4798-9822-873d2ea641b4-kube-api-access-ndp72\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.436768 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-catalog-content\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.436823 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-utilities\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.436871 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndp72\" (UniqueName: \"kubernetes.io/projected/e6b5003e-5d03-4798-9822-873d2ea641b4-kube-api-access-ndp72\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.437343 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-catalog-content\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.437368 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-utilities\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.451675 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndp72\" (UniqueName: \"kubernetes.io/projected/e6b5003e-5d03-4798-9822-873d2ea641b4-kube-api-access-ndp72\") pod \"redhat-marketplace-x2hwt\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.553300 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:04 crc kubenswrapper[4650]: I0201 07:40:04.633557 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg"] Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.183890 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.184133 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qnhp4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-cj2gg_openstack(81f3af79-80fc-4bd4-a429-82d9f42ef456): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.185660 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" podUID="81f3af79-80fc-4bd4-a429-82d9f42ef456" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.304788 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.305256 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4747q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-4ldzd_openstack(748cc059-b05f-4d89-a669-b295e49a39bd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.306475 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" podUID="748cc059-b05f-4d89-a669-b295e49a39bd" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.341760 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.341910 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rp9td,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-bhfwv_openstack(b2bab93e-0469-4ee5-841a-bca36667a835): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.343261 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" podUID="b2bab93e-0469-4ee5-841a-bca36667a835" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.346177 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.346291 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-89ttl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-4bmdn_openstack(061abe73-8375-4525-914a-b5e06db7e5db): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.351198 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" podUID="061abe73-8375-4525-914a-b5e06db7e5db" Feb 01 07:40:05 crc kubenswrapper[4650]: I0201 07:40:05.892000 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg" event={"ID":"0eea2c6a-8650-4a55-aab9-0b27b8e829b4","Type":"ContainerStarted","Data":"adafbeb26a05cc751d98be637e7aa9667c753216786e3bddd1bd3878c33cfcb8"} Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.895050 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" podUID="b2bab93e-0469-4ee5-841a-bca36667a835" Feb 01 07:40:05 crc kubenswrapper[4650]: E0201 07:40:05.895212 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" podUID="81f3af79-80fc-4bd4-a429-82d9f42ef456" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.198949 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-9xcg8"] Feb 01 07:40:06 crc kubenswrapper[4650]: W0201 07:40:06.227348 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a714fe2_3b81_4e99_8596_1b7ccd8d913c.slice/crio-62bb72ef48bbfa639000c1410994910aad8e841884b10507ac6b81abec54de5d WatchSource:0}: Error finding container 62bb72ef48bbfa639000c1410994910aad8e841884b10507ac6b81abec54de5d: Status 404 returned error can't find the container with id 62bb72ef48bbfa639000c1410994910aad8e841884b10507ac6b81abec54de5d Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.318656 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.471759 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/061abe73-8375-4525-914a-b5e06db7e5db-config\") pod \"061abe73-8375-4525-914a-b5e06db7e5db\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.471876 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89ttl\" (UniqueName: \"kubernetes.io/projected/061abe73-8375-4525-914a-b5e06db7e5db-kube-api-access-89ttl\") pod \"061abe73-8375-4525-914a-b5e06db7e5db\" (UID: \"061abe73-8375-4525-914a-b5e06db7e5db\") " Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.472410 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/061abe73-8375-4525-914a-b5e06db7e5db-config" (OuterVolumeSpecName: "config") pod "061abe73-8375-4525-914a-b5e06db7e5db" (UID: "061abe73-8375-4525-914a-b5e06db7e5db"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.472562 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/061abe73-8375-4525-914a-b5e06db7e5db-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.493144 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/061abe73-8375-4525-914a-b5e06db7e5db-kube-api-access-89ttl" (OuterVolumeSpecName: "kube-api-access-89ttl") pod "061abe73-8375-4525-914a-b5e06db7e5db" (UID: "061abe73-8375-4525-914a-b5e06db7e5db"). InnerVolumeSpecName "kube-api-access-89ttl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.574186 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89ttl\" (UniqueName: \"kubernetes.io/projected/061abe73-8375-4525-914a-b5e06db7e5db-kube-api-access-89ttl\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.575368 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.663308 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2hwt"] Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.674867 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-dns-svc\") pod \"748cc059-b05f-4d89-a669-b295e49a39bd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.674956 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4747q\" (UniqueName: \"kubernetes.io/projected/748cc059-b05f-4d89-a669-b295e49a39bd-kube-api-access-4747q\") pod \"748cc059-b05f-4d89-a669-b295e49a39bd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.674994 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-config\") pod \"748cc059-b05f-4d89-a669-b295e49a39bd\" (UID: \"748cc059-b05f-4d89-a669-b295e49a39bd\") " Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.675852 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-config" (OuterVolumeSpecName: "config") pod "748cc059-b05f-4d89-a669-b295e49a39bd" (UID: "748cc059-b05f-4d89-a669-b295e49a39bd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.675927 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "748cc059-b05f-4d89-a669-b295e49a39bd" (UID: "748cc059-b05f-4d89-a669-b295e49a39bd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.678241 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/748cc059-b05f-4d89-a669-b295e49a39bd-kube-api-access-4747q" (OuterVolumeSpecName: "kube-api-access-4747q") pod "748cc059-b05f-4d89-a669-b295e49a39bd" (UID: "748cc059-b05f-4d89-a669-b295e49a39bd"). InnerVolumeSpecName "kube-api-access-4747q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:06 crc kubenswrapper[4650]: W0201 07:40:06.750325 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode6b5003e_5d03_4798_9822_873d2ea641b4.slice/crio-d8f7365a32a2eeef93a1c7a07baefd4e141c01e18efe90adbf8928e5e6ffc0cb WatchSource:0}: Error finding container d8f7365a32a2eeef93a1c7a07baefd4e141c01e18efe90adbf8928e5e6ffc0cb: Status 404 returned error can't find the container with id d8f7365a32a2eeef93a1c7a07baefd4e141c01e18efe90adbf8928e5e6ffc0cb Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.777828 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.777869 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4747q\" (UniqueName: \"kubernetes.io/projected/748cc059-b05f-4d89-a669-b295e49a39bd-kube-api-access-4747q\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.777885 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/748cc059-b05f-4d89-a669-b295e49a39bd-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.791127 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.899959 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" event={"ID":"748cc059-b05f-4d89-a669-b295e49a39bd","Type":"ContainerDied","Data":"ee1abde9a68e76325ef57f9308ac55282a1395b6f2b8487c768e7bf5fe5a2abb"} Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.899984 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4ldzd" Feb 01 07:40:06 crc kubenswrapper[4650]: W0201 07:40:06.900725 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa3b9991_cc4c_437b_aa76_a0bb01050b1d.slice/crio-850138d0f79d5af574387c67a1bbb46040250e798a22a1a961282b7980f5ca5f WatchSource:0}: Error finding container 850138d0f79d5af574387c67a1bbb46040250e798a22a1a961282b7980f5ca5f: Status 404 returned error can't find the container with id 850138d0f79d5af574387c67a1bbb46040250e798a22a1a961282b7980f5ca5f Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.901555 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2hwt" event={"ID":"e6b5003e-5d03-4798-9822-873d2ea641b4","Type":"ContainerStarted","Data":"d8f7365a32a2eeef93a1c7a07baefd4e141c01e18efe90adbf8928e5e6ffc0cb"} Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.904397 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" event={"ID":"061abe73-8375-4525-914a-b5e06db7e5db","Type":"ContainerDied","Data":"0aea3ae370c0cc28f4f7e80b22b5132285ce7fcf6a8b16d64b2f2cfebff7991e"} Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.904470 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bmdn" Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.906257 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ce8e76c5-52b4-46aa-b009-181f08e5cdc7","Type":"ContainerStarted","Data":"685d7447152474b35728acaebfe6d0a28a96da5fb94b3ec3071d77f9dc6f5dd1"} Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.908058 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9xcg8" event={"ID":"0a714fe2-3b81-4e99-8596-1b7ccd8d913c","Type":"ContainerStarted","Data":"62bb72ef48bbfa639000c1410994910aad8e841884b10507ac6b81abec54de5d"} Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.985802 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4ldzd"] Feb 01 07:40:06 crc kubenswrapper[4650]: I0201 07:40:06.998478 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4ldzd"] Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.016087 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bmdn"] Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.026940 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bmdn"] Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.062969 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 01 07:40:07 crc kubenswrapper[4650]: W0201 07:40:07.502519 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd74f3701_5d40_45dd_8a2d_041cdb5a8720.slice/crio-d11383a1a40bd7960cf6b92d9d5319c494c2045a8227e5f531d13c2b7bc91e70 WatchSource:0}: Error finding container d11383a1a40bd7960cf6b92d9d5319c494c2045a8227e5f531d13c2b7bc91e70: Status 404 returned error can't find the container with id d11383a1a40bd7960cf6b92d9d5319c494c2045a8227e5f531d13c2b7bc91e70 Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.920109 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"722b2919-c0d6-4596-82cc-5ae2b5951263","Type":"ContainerStarted","Data":"32437ee89767575c4ab78143becb2f56aa0392cfd1b4b00d44cd1c32a22178c5"} Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.923713 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ca494288-bfa9-474d-8805-21226c9d7cbd","Type":"ContainerStarted","Data":"21602e5a95f6dcc84c4a00fbf3abd8c2e545345909227e531681c210f6cc9247"} Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.923833 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.925667 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"aa3b9991-cc4c-437b-aa76-a0bb01050b1d","Type":"ContainerStarted","Data":"850138d0f79d5af574387c67a1bbb46040250e798a22a1a961282b7980f5ca5f"} Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.927891 4650 generic.go:334] "Generic (PLEG): container finished" podID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerID="546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788" exitCode=0 Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.927936 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2hwt" event={"ID":"e6b5003e-5d03-4798-9822-873d2ea641b4","Type":"ContainerDied","Data":"546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788"} Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.930150 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d74f3701-5d40-45dd-8a2d-041cdb5a8720","Type":"ContainerStarted","Data":"d11383a1a40bd7960cf6b92d9d5319c494c2045a8227e5f531d13c2b7bc91e70"} Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.931885 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac","Type":"ContainerStarted","Data":"1d308549f7429784f4a456228c9f7c5f40c892da9351bfa4038f8818a8714dd2"} Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.938914 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf","Type":"ContainerStarted","Data":"7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27"} Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.938958 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.986417 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="061abe73-8375-4525-914a-b5e06db7e5db" path="/var/lib/kubelet/pods/061abe73-8375-4525-914a-b5e06db7e5db/volumes" Feb 01 07:40:07 crc kubenswrapper[4650]: I0201 07:40:07.986869 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="748cc059-b05f-4d89-a669-b295e49a39bd" path="/var/lib/kubelet/pods/748cc059-b05f-4d89-a669-b295e49a39bd/volumes" Feb 01 07:40:08 crc kubenswrapper[4650]: I0201 07:40:08.007268 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.051722647 podStartE2EDuration="26.007249582s" podCreationTimestamp="2026-02-01 07:39:42 +0000 UTC" firstStartedPulling="2026-02-01 07:39:43.611200945 +0000 UTC m=+982.334299190" lastFinishedPulling="2026-02-01 07:40:07.56672788 +0000 UTC m=+1006.289826125" observedRunningTime="2026-02-01 07:40:08.001674345 +0000 UTC m=+1006.724772590" watchObservedRunningTime="2026-02-01 07:40:08.007249582 +0000 UTC m=+1006.730347827" Feb 01 07:40:08 crc kubenswrapper[4650]: I0201 07:40:08.027357 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.972211812 podStartE2EDuration="28.027339388s" podCreationTimestamp="2026-02-01 07:39:40 +0000 UTC" firstStartedPulling="2026-02-01 07:39:41.25552335 +0000 UTC m=+979.978621595" lastFinishedPulling="2026-02-01 07:40:05.310650926 +0000 UTC m=+1004.033749171" observedRunningTime="2026-02-01 07:40:08.026452315 +0000 UTC m=+1006.749550560" watchObservedRunningTime="2026-02-01 07:40:08.027339388 +0000 UTC m=+1006.750437633" Feb 01 07:40:08 crc kubenswrapper[4650]: I0201 07:40:08.947877 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c378d90-fab5-4d68-9aba-892645206b97","Type":"ContainerStarted","Data":"ff627a2b40a0ccff03027386f28859d19e57b613c37de01159985101a0578d2b"} Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.962711 4650 generic.go:334] "Generic (PLEG): container finished" podID="0a714fe2-3b81-4e99-8596-1b7ccd8d913c" containerID="559331ae29e1531bdcd37c299ca5d165661519a418bdb32b4ba67c16f6f80060" exitCode=0 Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.962864 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9xcg8" event={"ID":"0a714fe2-3b81-4e99-8596-1b7ccd8d913c","Type":"ContainerDied","Data":"559331ae29e1531bdcd37c299ca5d165661519a418bdb32b4ba67c16f6f80060"} Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.971158 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"aa3b9991-cc4c-437b-aa76-a0bb01050b1d","Type":"ContainerStarted","Data":"96960e2edeb0d398d23ccce53f9eacaa084a2ba38074193c2a82e20efec85b3e"} Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.974060 4650 generic.go:334] "Generic (PLEG): container finished" podID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerID="c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3" exitCode=0 Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.974127 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2hwt" event={"ID":"e6b5003e-5d03-4798-9822-873d2ea641b4","Type":"ContainerDied","Data":"c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3"} Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.979578 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg" event={"ID":"0eea2c6a-8650-4a55-aab9-0b27b8e829b4","Type":"ContainerStarted","Data":"d0c63062c37c660638981be04e3d83d8838f2ececafa85fb45db1ab5e0f37090"} Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.979634 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-df4vg" Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.981260 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d74f3701-5d40-45dd-8a2d-041cdb5a8720","Type":"ContainerStarted","Data":"92956962b114e88f523e95414f631b79c2c3568dc6e7df13026a46c21a9b2339"} Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.983433 4650 generic.go:334] "Generic (PLEG): container finished" podID="ce8e76c5-52b4-46aa-b009-181f08e5cdc7" containerID="685d7447152474b35728acaebfe6d0a28a96da5fb94b3ec3071d77f9dc6f5dd1" exitCode=0 Feb 01 07:40:10 crc kubenswrapper[4650]: I0201 07:40:10.983459 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ce8e76c5-52b4-46aa-b009-181f08e5cdc7","Type":"ContainerDied","Data":"685d7447152474b35728acaebfe6d0a28a96da5fb94b3ec3071d77f9dc6f5dd1"} Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.006918 4650 generic.go:334] "Generic (PLEG): container finished" podID="a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac" containerID="1d308549f7429784f4a456228c9f7c5f40c892da9351bfa4038f8818a8714dd2" exitCode=0 Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.007019 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac","Type":"ContainerDied","Data":"1d308549f7429784f4a456228c9f7c5f40c892da9351bfa4038f8818a8714dd2"} Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.009971 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ce8e76c5-52b4-46aa-b009-181f08e5cdc7","Type":"ContainerStarted","Data":"74908d52ea7e4b187f528caa5ed5dbbe3dc5cc9912ff8d326284b04a9fc71579"} Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.013074 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9xcg8" event={"ID":"0a714fe2-3b81-4e99-8596-1b7ccd8d913c","Type":"ContainerStarted","Data":"632cc9fff8d32b58c2726bde7057fda2ddd8880d275d3c6928d6280a18be7ed7"} Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.013107 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9xcg8" event={"ID":"0a714fe2-3b81-4e99-8596-1b7ccd8d913c","Type":"ContainerStarted","Data":"a00873a1a630b6c148364a35159aef2c77d762b2ceee24886fd25799035c5ce8"} Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.013377 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.013977 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.020041 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2hwt" event={"ID":"e6b5003e-5d03-4798-9822-873d2ea641b4","Type":"ContainerStarted","Data":"2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e"} Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.023795 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-df4vg" podStartSLOduration=22.205330232 podStartE2EDuration="27.023778462s" podCreationTimestamp="2026-02-01 07:39:45 +0000 UTC" firstStartedPulling="2026-02-01 07:40:05.291505244 +0000 UTC m=+1004.014603489" lastFinishedPulling="2026-02-01 07:40:10.109953474 +0000 UTC m=+1008.833051719" observedRunningTime="2026-02-01 07:40:11.156659413 +0000 UTC m=+1009.879757658" watchObservedRunningTime="2026-02-01 07:40:12.023778462 +0000 UTC m=+1010.746876697" Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.084724 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.503328079 podStartE2EDuration="35.084697469s" podCreationTimestamp="2026-02-01 07:39:37 +0000 UTC" firstStartedPulling="2026-02-01 07:39:39.692392798 +0000 UTC m=+978.415491043" lastFinishedPulling="2026-02-01 07:40:05.273762168 +0000 UTC m=+1003.996860433" observedRunningTime="2026-02-01 07:40:12.049312931 +0000 UTC m=+1010.772411186" watchObservedRunningTime="2026-02-01 07:40:12.084697469 +0000 UTC m=+1010.807795744" Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.123590 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-9xcg8" podStartSLOduration=23.244179395 podStartE2EDuration="27.123567729s" podCreationTimestamp="2026-02-01 07:39:45 +0000 UTC" firstStartedPulling="2026-02-01 07:40:06.231219497 +0000 UTC m=+1004.954317742" lastFinishedPulling="2026-02-01 07:40:10.110607831 +0000 UTC m=+1008.833706076" observedRunningTime="2026-02-01 07:40:12.068620488 +0000 UTC m=+1010.791718733" watchObservedRunningTime="2026-02-01 07:40:12.123567729 +0000 UTC m=+1010.846665974" Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.129723 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x2hwt" podStartSLOduration=6.12813085 podStartE2EDuration="8.12971163s" podCreationTimestamp="2026-02-01 07:40:04 +0000 UTC" firstStartedPulling="2026-02-01 07:40:09.396249417 +0000 UTC m=+1008.119347662" lastFinishedPulling="2026-02-01 07:40:11.397830197 +0000 UTC m=+1010.120928442" observedRunningTime="2026-02-01 07:40:12.092291029 +0000 UTC m=+1010.815389274" watchObservedRunningTime="2026-02-01 07:40:12.12971163 +0000 UTC m=+1010.852809875" Feb 01 07:40:12 crc kubenswrapper[4650]: I0201 07:40:12.583457 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.025846 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d74f3701-5d40-45dd-8a2d-041cdb5a8720","Type":"ContainerStarted","Data":"3cce5e9b0ea9e93356b28cc3e32aac2033e8bf3e88dcc18bb11ab650f7be9b97"} Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.028866 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac","Type":"ContainerStarted","Data":"826463fbe3d3f9c1a71aef9b13980bd5cbd2905de80103f4bfb262a00e8aa24c"} Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.030972 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"aa3b9991-cc4c-437b-aa76-a0bb01050b1d","Type":"ContainerStarted","Data":"062f00c773a2786281ab1c4add0751e8d03183dbe7317203944e17cfca92a150"} Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.049208 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=22.762850523 podStartE2EDuration="28.049190793s" podCreationTimestamp="2026-02-01 07:39:45 +0000 UTC" firstStartedPulling="2026-02-01 07:40:07.543590573 +0000 UTC m=+1006.266688818" lastFinishedPulling="2026-02-01 07:40:12.829930853 +0000 UTC m=+1011.553029088" observedRunningTime="2026-02-01 07:40:13.045733612 +0000 UTC m=+1011.768831877" watchObservedRunningTime="2026-02-01 07:40:13.049190793 +0000 UTC m=+1011.772289058" Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.067441 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=9.10949606 podStartE2EDuration="34.067420021s" podCreationTimestamp="2026-02-01 07:39:39 +0000 UTC" firstStartedPulling="2026-02-01 07:39:41.120836468 +0000 UTC m=+979.843934713" lastFinishedPulling="2026-02-01 07:40:06.078760429 +0000 UTC m=+1004.801858674" observedRunningTime="2026-02-01 07:40:13.063695003 +0000 UTC m=+1011.786793258" watchObservedRunningTime="2026-02-01 07:40:13.067420021 +0000 UTC m=+1011.790518276" Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.084372 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=20.197886677 podStartE2EDuration="26.084355065s" podCreationTimestamp="2026-02-01 07:39:47 +0000 UTC" firstStartedPulling="2026-02-01 07:40:06.93297765 +0000 UTC m=+1005.656075895" lastFinishedPulling="2026-02-01 07:40:12.819446038 +0000 UTC m=+1011.542544283" observedRunningTime="2026-02-01 07:40:13.077484565 +0000 UTC m=+1011.800582830" watchObservedRunningTime="2026-02-01 07:40:13.084355065 +0000 UTC m=+1011.807453310" Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.345180 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Feb 01 07:40:13 crc kubenswrapper[4650]: I0201 07:40:13.384989 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Feb 01 07:40:14 crc kubenswrapper[4650]: I0201 07:40:14.044728 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Feb 01 07:40:14 crc kubenswrapper[4650]: I0201 07:40:14.089850 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Feb 01 07:40:14 crc kubenswrapper[4650]: I0201 07:40:14.157488 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Feb 01 07:40:14 crc kubenswrapper[4650]: I0201 07:40:14.554869 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:14 crc kubenswrapper[4650]: I0201 07:40:14.554966 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:14 crc kubenswrapper[4650]: I0201 07:40:14.640120 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.056872 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.125501 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.132985 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.427489 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cj2gg"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.512160 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-9qx7d"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.519248 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.523300 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-9qx7d"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.527937 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.533661 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-f4tcc"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.534968 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.542810 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570336 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-config\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570548 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e49b7206-629f-498a-b30e-e73c08c0bacf-config\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570616 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e49b7206-629f-498a-b30e-e73c08c0bacf-ovn-rundir\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570705 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570784 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e49b7206-629f-498a-b30e-e73c08c0bacf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570847 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e49b7206-629f-498a-b30e-e73c08c0bacf-ovs-rundir\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570924 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e49b7206-629f-498a-b30e-e73c08c0bacf-combined-ca-bundle\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.570996 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.571089 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzbmf\" (UniqueName: \"kubernetes.io/projected/0286473d-0769-476d-962a-aa62b2470cad-kube-api-access-mzbmf\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.571188 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pmxg\" (UniqueName: \"kubernetes.io/projected/e49b7206-629f-498a-b30e-e73c08c0bacf-kube-api-access-6pmxg\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.580082 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-f4tcc"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676608 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e49b7206-629f-498a-b30e-e73c08c0bacf-config\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676681 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e49b7206-629f-498a-b30e-e73c08c0bacf-ovn-rundir\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676722 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676749 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e49b7206-629f-498a-b30e-e73c08c0bacf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676772 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e49b7206-629f-498a-b30e-e73c08c0bacf-ovs-rundir\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676798 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e49b7206-629f-498a-b30e-e73c08c0bacf-combined-ca-bundle\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676823 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676842 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzbmf\" (UniqueName: \"kubernetes.io/projected/0286473d-0769-476d-962a-aa62b2470cad-kube-api-access-mzbmf\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676898 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pmxg\" (UniqueName: \"kubernetes.io/projected/e49b7206-629f-498a-b30e-e73c08c0bacf-kube-api-access-6pmxg\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.676942 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-config\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.677393 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e49b7206-629f-498a-b30e-e73c08c0bacf-ovs-rundir\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.678121 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e49b7206-629f-498a-b30e-e73c08c0bacf-config\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.678712 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.678767 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e49b7206-629f-498a-b30e-e73c08c0bacf-ovn-rundir\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.679492 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.682738 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-config\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.686732 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e49b7206-629f-498a-b30e-e73c08c0bacf-combined-ca-bundle\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.689498 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e49b7206-629f-498a-b30e-e73c08c0bacf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.706214 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.713109 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.714419 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.719615 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.720109 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.720213 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-wnc9g" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.720334 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.722909 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.746349 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pmxg\" (UniqueName: \"kubernetes.io/projected/e49b7206-629f-498a-b30e-e73c08c0bacf-kube-api-access-6pmxg\") pod \"ovn-controller-metrics-9qx7d\" (UID: \"e49b7206-629f-498a-b30e-e73c08c0bacf\") " pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.754986 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzbmf\" (UniqueName: \"kubernetes.io/projected/0286473d-0769-476d-962a-aa62b2470cad-kube-api-access-mzbmf\") pod \"dnsmasq-dns-6bc7876d45-f4tcc\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.756336 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-bhfwv"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.778634 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f5748bca-cf73-483d-a5ca-86e592adbc18-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.778758 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5748bca-cf73-483d-a5ca-86e592adbc18-scripts\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.778792 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s29jw\" (UniqueName: \"kubernetes.io/projected/f5748bca-cf73-483d-a5ca-86e592adbc18-kube-api-access-s29jw\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.778867 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5748bca-cf73-483d-a5ca-86e592adbc18-config\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.778938 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.778980 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.779002 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.849268 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-9fxzp"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.850450 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.856380 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.879046 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-9qx7d" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.879563 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-9fxzp"] Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.879947 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5748bca-cf73-483d-a5ca-86e592adbc18-scripts\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.879982 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880004 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s29jw\" (UniqueName: \"kubernetes.io/projected/f5748bca-cf73-483d-a5ca-86e592adbc18-kube-api-access-s29jw\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880048 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-config\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880081 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5748bca-cf73-483d-a5ca-86e592adbc18-config\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880122 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880136 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-dns-svc\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880166 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880193 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880212 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880233 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9r7f\" (UniqueName: \"kubernetes.io/projected/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-kube-api-access-p9r7f\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.880251 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f5748bca-cf73-483d-a5ca-86e592adbc18-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.881676 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f5748bca-cf73-483d-a5ca-86e592adbc18-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.882292 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5748bca-cf73-483d-a5ca-86e592adbc18-scripts\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.884304 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5748bca-cf73-483d-a5ca-86e592adbc18-config\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.892603 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.903701 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.906722 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5748bca-cf73-483d-a5ca-86e592adbc18-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.907096 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.916852 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s29jw\" (UniqueName: \"kubernetes.io/projected/f5748bca-cf73-483d-a5ca-86e592adbc18-kube-api-access-s29jw\") pod \"ovn-northd-0\" (UID: \"f5748bca-cf73-483d-a5ca-86e592adbc18\") " pod="openstack/ovn-northd-0" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.982780 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-dns-svc\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.982827 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.982858 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9r7f\" (UniqueName: \"kubernetes.io/projected/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-kube-api-access-p9r7f\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.982903 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.982924 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-config\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.985013 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-config\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.986574 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-dns-svc\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.988158 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:15 crc kubenswrapper[4650]: I0201 07:40:15.996831 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.027793 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9r7f\" (UniqueName: \"kubernetes.io/projected/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-kube-api-access-p9r7f\") pod \"dnsmasq-dns-8554648995-9fxzp\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.094248 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.128764 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.179739 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.186593 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnhp4\" (UniqueName: \"kubernetes.io/projected/81f3af79-80fc-4bd4-a429-82d9f42ef456-kube-api-access-qnhp4\") pod \"81f3af79-80fc-4bd4-a429-82d9f42ef456\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.186706 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-config\") pod \"81f3af79-80fc-4bd4-a429-82d9f42ef456\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.186835 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-dns-svc\") pod \"81f3af79-80fc-4bd4-a429-82d9f42ef456\" (UID: \"81f3af79-80fc-4bd4-a429-82d9f42ef456\") " Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.190846 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "81f3af79-80fc-4bd4-a429-82d9f42ef456" (UID: "81f3af79-80fc-4bd4-a429-82d9f42ef456"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.193184 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-config" (OuterVolumeSpecName: "config") pod "81f3af79-80fc-4bd4-a429-82d9f42ef456" (UID: "81f3af79-80fc-4bd4-a429-82d9f42ef456"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.212449 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f3af79-80fc-4bd4-a429-82d9f42ef456-kube-api-access-qnhp4" (OuterVolumeSpecName: "kube-api-access-qnhp4") pod "81f3af79-80fc-4bd4-a429-82d9f42ef456" (UID: "81f3af79-80fc-4bd4-a429-82d9f42ef456"). InnerVolumeSpecName "kube-api-access-qnhp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.297634 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnhp4\" (UniqueName: \"kubernetes.io/projected/81f3af79-80fc-4bd4-a429-82d9f42ef456-kube-api-access-qnhp4\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.298156 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.298168 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81f3af79-80fc-4bd4-a429-82d9f42ef456-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.348193 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.501695 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp9td\" (UniqueName: \"kubernetes.io/projected/b2bab93e-0469-4ee5-841a-bca36667a835-kube-api-access-rp9td\") pod \"b2bab93e-0469-4ee5-841a-bca36667a835\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.501846 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-config\") pod \"b2bab93e-0469-4ee5-841a-bca36667a835\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.501964 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-dns-svc\") pod \"b2bab93e-0469-4ee5-841a-bca36667a835\" (UID: \"b2bab93e-0469-4ee5-841a-bca36667a835\") " Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.502552 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-config" (OuterVolumeSpecName: "config") pod "b2bab93e-0469-4ee5-841a-bca36667a835" (UID: "b2bab93e-0469-4ee5-841a-bca36667a835"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.502920 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.503184 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b2bab93e-0469-4ee5-841a-bca36667a835" (UID: "b2bab93e-0469-4ee5-841a-bca36667a835"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.506543 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2bab93e-0469-4ee5-841a-bca36667a835-kube-api-access-rp9td" (OuterVolumeSpecName: "kube-api-access-rp9td") pod "b2bab93e-0469-4ee5-841a-bca36667a835" (UID: "b2bab93e-0469-4ee5-841a-bca36667a835"). InnerVolumeSpecName "kube-api-access-rp9td". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.575116 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-f4tcc"] Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.604235 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp9td\" (UniqueName: \"kubernetes.io/projected/b2bab93e-0469-4ee5-841a-bca36667a835-kube-api-access-rp9td\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.604261 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2bab93e-0469-4ee5-841a-bca36667a835-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:16 crc kubenswrapper[4650]: W0201 07:40:16.604522 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode49b7206_629f_498a_b30e_e73c08c0bacf.slice/crio-099fb00a653dba77d8960d3d39504f16b6b2b6c38cade50625c089bd3b12b38b WatchSource:0}: Error finding container 099fb00a653dba77d8960d3d39504f16b6b2b6c38cade50625c089bd3b12b38b: Status 404 returned error can't find the container with id 099fb00a653dba77d8960d3d39504f16b6b2b6c38cade50625c089bd3b12b38b Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.605780 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-9qx7d"] Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.724915 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-9fxzp"] Feb 01 07:40:16 crc kubenswrapper[4650]: I0201 07:40:16.782450 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.100443 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" event={"ID":"b2bab93e-0469-4ee5-841a-bca36667a835","Type":"ContainerDied","Data":"18495a1a19babf48926fc04c73e2a3abe34e4f1eaedc574b9d7eba08c1b603a4"} Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.100520 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-bhfwv" Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.108318 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f5748bca-cf73-483d-a5ca-86e592adbc18","Type":"ContainerStarted","Data":"945d984ea18788ef61d9ed8d24919fe2cf28980cb385938b22be4e988506ff69"} Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.109049 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" event={"ID":"0286473d-0769-476d-962a-aa62b2470cad","Type":"ContainerStarted","Data":"52a8ca047daae1235fe15f32d578baf2a0be4f049b1d7ed183ee9d8cc5a341a5"} Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.112374 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" event={"ID":"81f3af79-80fc-4bd4-a429-82d9f42ef456","Type":"ContainerDied","Data":"ac021193874234dcc4e9d2de7f07f3cacad5bc43e25043f8256262f0a484a698"} Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.112408 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-cj2gg" Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.118035 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-9fxzp" event={"ID":"49d5b7da-4df2-4c94-8dcd-8e3fbf589474","Type":"ContainerStarted","Data":"9116ee116dc168c3d1964bd7a9927cc5cee946c13fe54ddfb63e8c8ba8c9119b"} Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.128049 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-9qx7d" event={"ID":"e49b7206-629f-498a-b30e-e73c08c0bacf","Type":"ContainerStarted","Data":"ae716c735841b47e09576833860850e7bc0ffe0afbb261e0381f02fd144db467"} Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.128087 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-9qx7d" event={"ID":"e49b7206-629f-498a-b30e-e73c08c0bacf","Type":"ContainerStarted","Data":"099fb00a653dba77d8960d3d39504f16b6b2b6c38cade50625c089bd3b12b38b"} Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.149617 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-9qx7d" podStartSLOduration=2.149594922 podStartE2EDuration="2.149594922s" podCreationTimestamp="2026-02-01 07:40:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:40:17.145128575 +0000 UTC m=+1015.868226820" watchObservedRunningTime="2026-02-01 07:40:17.149594922 +0000 UTC m=+1015.872693167" Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.201264 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-bhfwv"] Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.208729 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-bhfwv"] Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.263161 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cj2gg"] Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.272971 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-cj2gg"] Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.982510 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81f3af79-80fc-4bd4-a429-82d9f42ef456" path="/var/lib/kubelet/pods/81f3af79-80fc-4bd4-a429-82d9f42ef456/volumes" Feb 01 07:40:17 crc kubenswrapper[4650]: I0201 07:40:17.982993 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2bab93e-0469-4ee5-841a-bca36667a835" path="/var/lib/kubelet/pods/b2bab93e-0469-4ee5-841a-bca36667a835/volumes" Feb 01 07:40:18 crc kubenswrapper[4650]: I0201 07:40:18.138064 4650 generic.go:334] "Generic (PLEG): container finished" podID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerID="d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964" exitCode=0 Feb 01 07:40:18 crc kubenswrapper[4650]: I0201 07:40:18.138229 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-9fxzp" event={"ID":"49d5b7da-4df2-4c94-8dcd-8e3fbf589474","Type":"ContainerDied","Data":"d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964"} Feb 01 07:40:18 crc kubenswrapper[4650]: I0201 07:40:18.144550 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f5748bca-cf73-483d-a5ca-86e592adbc18","Type":"ContainerStarted","Data":"3a7f15928dc47cde909dcf8190882a9f7967310e82360a58177da537b13a0288"} Feb 01 07:40:18 crc kubenswrapper[4650]: I0201 07:40:18.148548 4650 generic.go:334] "Generic (PLEG): container finished" podID="0286473d-0769-476d-962a-aa62b2470cad" containerID="b2c42005fc19b7083f4bdb8ccce373685b37441a1113dae3f62a8d1eb72c1faf" exitCode=0 Feb 01 07:40:18 crc kubenswrapper[4650]: I0201 07:40:18.148729 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" event={"ID":"0286473d-0769-476d-962a-aa62b2470cad","Type":"ContainerDied","Data":"b2c42005fc19b7083f4bdb8ccce373685b37441a1113dae3f62a8d1eb72c1faf"} Feb 01 07:40:18 crc kubenswrapper[4650]: I0201 07:40:18.941659 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Feb 01 07:40:18 crc kubenswrapper[4650]: I0201 07:40:18.941732 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.059967 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.159765 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-9fxzp" event={"ID":"49d5b7da-4df2-4c94-8dcd-8e3fbf589474","Type":"ContainerStarted","Data":"bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d"} Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.160312 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.162070 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f5748bca-cf73-483d-a5ca-86e592adbc18","Type":"ContainerStarted","Data":"4895da5be8432484b34257fc3dcfd1cb747f7ecbc9a26473555d4affe521a194"} Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.162787 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.165893 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" event={"ID":"0286473d-0769-476d-962a-aa62b2470cad","Type":"ContainerStarted","Data":"63f763a04fae8eba25d9109952eee3afbea3236ade42667a38d4bb56c000ece9"} Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.165925 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.190854 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-9fxzp" podStartSLOduration=3.757059836 podStartE2EDuration="4.190835352s" podCreationTimestamp="2026-02-01 07:40:15 +0000 UTC" firstStartedPulling="2026-02-01 07:40:16.736772836 +0000 UTC m=+1015.459871071" lastFinishedPulling="2026-02-01 07:40:17.170548342 +0000 UTC m=+1015.893646587" observedRunningTime="2026-02-01 07:40:19.185918723 +0000 UTC m=+1017.909016968" watchObservedRunningTime="2026-02-01 07:40:19.190835352 +0000 UTC m=+1017.913933597" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.234117 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.14480211 podStartE2EDuration="4.234092116s" podCreationTimestamp="2026-02-01 07:40:15 +0000 UTC" firstStartedPulling="2026-02-01 07:40:16.791972004 +0000 UTC m=+1015.515070249" lastFinishedPulling="2026-02-01 07:40:17.881262 +0000 UTC m=+1016.604360255" observedRunningTime="2026-02-01 07:40:19.230730668 +0000 UTC m=+1017.953828923" watchObservedRunningTime="2026-02-01 07:40:19.234092116 +0000 UTC m=+1017.957190381" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.235244 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" podStartSLOduration=3.829571278 podStartE2EDuration="4.235236756s" podCreationTimestamp="2026-02-01 07:40:15 +0000 UTC" firstStartedPulling="2026-02-01 07:40:16.58817955 +0000 UTC m=+1015.311277795" lastFinishedPulling="2026-02-01 07:40:16.993845028 +0000 UTC m=+1015.716943273" observedRunningTime="2026-02-01 07:40:19.205675521 +0000 UTC m=+1017.928773776" watchObservedRunningTime="2026-02-01 07:40:19.235236756 +0000 UTC m=+1017.958335001" Feb 01 07:40:19 crc kubenswrapper[4650]: I0201 07:40:19.283631 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.250941 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f6d0-account-create-update-nhvh2"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.252285 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.255347 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.267301 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-px9bz"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.269851 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.285633 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f6d0-account-create-update-nhvh2"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.324135 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-px9bz"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.414596 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-operator-scripts\") pod \"keystone-db-create-px9bz\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.414903 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6k9f\" (UniqueName: \"kubernetes.io/projected/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-kube-api-access-q6k9f\") pod \"keystone-db-create-px9bz\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.415008 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcdh4\" (UniqueName: \"kubernetes.io/projected/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-kube-api-access-xcdh4\") pod \"keystone-f6d0-account-create-update-nhvh2\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.415259 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-operator-scripts\") pod \"keystone-f6d0-account-create-update-nhvh2\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.444833 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.445299 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.517075 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-operator-scripts\") pod \"keystone-db-create-px9bz\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.517124 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6k9f\" (UniqueName: \"kubernetes.io/projected/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-kube-api-access-q6k9f\") pod \"keystone-db-create-px9bz\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.517160 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcdh4\" (UniqueName: \"kubernetes.io/projected/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-kube-api-access-xcdh4\") pod \"keystone-f6d0-account-create-update-nhvh2\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.517247 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-operator-scripts\") pod \"keystone-f6d0-account-create-update-nhvh2\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.517870 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-operator-scripts\") pod \"keystone-f6d0-account-create-update-nhvh2\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.518827 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-operator-scripts\") pod \"keystone-db-create-px9bz\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.549780 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcdh4\" (UniqueName: \"kubernetes.io/projected/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-kube-api-access-xcdh4\") pod \"keystone-f6d0-account-create-update-nhvh2\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.556844 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-nkfbd"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.558094 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.559812 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6k9f\" (UniqueName: \"kubernetes.io/projected/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-kube-api-access-q6k9f\") pod \"keystone-db-create-px9bz\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.584282 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.597103 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-nkfbd"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.609557 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-1135-account-create-update-n5rhb"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.611707 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.618971 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.624242 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.635252 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-1135-account-create-update-n5rhb"] Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.637643 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.722883 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbtkc\" (UniqueName: \"kubernetes.io/projected/cc47f442-6ef1-4710-8033-9a9367b45a24-kube-api-access-nbtkc\") pod \"placement-db-create-nkfbd\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.722933 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc47f442-6ef1-4710-8033-9a9367b45a24-operator-scripts\") pod \"placement-db-create-nkfbd\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.722970 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c29250-327f-47ad-b068-f42861c819ab-operator-scripts\") pod \"placement-1135-account-create-update-n5rhb\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.722995 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2p24z\" (UniqueName: \"kubernetes.io/projected/49c29250-327f-47ad-b068-f42861c819ab-kube-api-access-2p24z\") pod \"placement-1135-account-create-update-n5rhb\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.828613 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbtkc\" (UniqueName: \"kubernetes.io/projected/cc47f442-6ef1-4710-8033-9a9367b45a24-kube-api-access-nbtkc\") pod \"placement-db-create-nkfbd\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.829035 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc47f442-6ef1-4710-8033-9a9367b45a24-operator-scripts\") pod \"placement-db-create-nkfbd\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.829072 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c29250-327f-47ad-b068-f42861c819ab-operator-scripts\") pod \"placement-1135-account-create-update-n5rhb\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.829115 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2p24z\" (UniqueName: \"kubernetes.io/projected/49c29250-327f-47ad-b068-f42861c819ab-kube-api-access-2p24z\") pod \"placement-1135-account-create-update-n5rhb\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.830077 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c29250-327f-47ad-b068-f42861c819ab-operator-scripts\") pod \"placement-1135-account-create-update-n5rhb\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.830613 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc47f442-6ef1-4710-8033-9a9367b45a24-operator-scripts\") pod \"placement-db-create-nkfbd\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.846570 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbtkc\" (UniqueName: \"kubernetes.io/projected/cc47f442-6ef1-4710-8033-9a9367b45a24-kube-api-access-nbtkc\") pod \"placement-db-create-nkfbd\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.851631 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2p24z\" (UniqueName: \"kubernetes.io/projected/49c29250-327f-47ad-b068-f42861c819ab-kube-api-access-2p24z\") pod \"placement-1135-account-create-update-n5rhb\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:20 crc kubenswrapper[4650]: I0201 07:40:20.935802 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:21 crc kubenswrapper[4650]: I0201 07:40:21.072433 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:21 crc kubenswrapper[4650]: W0201 07:40:21.145995 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddff691a9_0cd8_42ec_9f8d_1fbe9429566b.slice/crio-7217270a8f31f7cc5a76a066042e842519ec21267db15d8ea3ac06bce208d84b WatchSource:0}: Error finding container 7217270a8f31f7cc5a76a066042e842519ec21267db15d8ea3ac06bce208d84b: Status 404 returned error can't find the container with id 7217270a8f31f7cc5a76a066042e842519ec21267db15d8ea3ac06bce208d84b Feb 01 07:40:21 crc kubenswrapper[4650]: I0201 07:40:21.147636 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f6d0-account-create-update-nhvh2"] Feb 01 07:40:21 crc kubenswrapper[4650]: I0201 07:40:21.186882 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f6d0-account-create-update-nhvh2" event={"ID":"dff691a9-0cd8-42ec-9f8d-1fbe9429566b","Type":"ContainerStarted","Data":"7217270a8f31f7cc5a76a066042e842519ec21267db15d8ea3ac06bce208d84b"} Feb 01 07:40:21 crc kubenswrapper[4650]: I0201 07:40:21.209457 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-px9bz"] Feb 01 07:40:21 crc kubenswrapper[4650]: W0201 07:40:21.217410 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfbfc491_edcf_4ced_88d5_68f3373f5aa7.slice/crio-f487ada74c731a26a09603519f57eca3eb356fc4dc4c9f13bf3549a45efa3e4f WatchSource:0}: Error finding container f487ada74c731a26a09603519f57eca3eb356fc4dc4c9f13bf3549a45efa3e4f: Status 404 returned error can't find the container with id f487ada74c731a26a09603519f57eca3eb356fc4dc4c9f13bf3549a45efa3e4f Feb 01 07:40:21 crc kubenswrapper[4650]: I0201 07:40:21.309478 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Feb 01 07:40:21 crc kubenswrapper[4650]: I0201 07:40:21.400263 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-nkfbd"] Feb 01 07:40:21 crc kubenswrapper[4650]: I0201 07:40:21.557665 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-1135-account-create-update-n5rhb"] Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.196241 4650 generic.go:334] "Generic (PLEG): container finished" podID="dff691a9-0cd8-42ec-9f8d-1fbe9429566b" containerID="8d5e5dbe5759c31bf64982ba526d498bc73d1f9f0f7eeff69ca59b13ca7d8701" exitCode=0 Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.196337 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f6d0-account-create-update-nhvh2" event={"ID":"dff691a9-0cd8-42ec-9f8d-1fbe9429566b","Type":"ContainerDied","Data":"8d5e5dbe5759c31bf64982ba526d498bc73d1f9f0f7eeff69ca59b13ca7d8701"} Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.198628 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1135-account-create-update-n5rhb" event={"ID":"49c29250-327f-47ad-b068-f42861c819ab","Type":"ContainerStarted","Data":"47c77622a6e374d174339b58a313b096f458fa48d3f755f722f33343b2680dbc"} Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.198672 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1135-account-create-update-n5rhb" event={"ID":"49c29250-327f-47ad-b068-f42861c819ab","Type":"ContainerStarted","Data":"26ee7fd520e35eb6600fbc7026138ac226b6e996a5bc78b4665fd9d54da785c1"} Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.200373 4650 generic.go:334] "Generic (PLEG): container finished" podID="dfbfc491-edcf-4ced-88d5-68f3373f5aa7" containerID="9224ae6d3e41fb31799d33c0395616bd510a4ea25b4d166b85998ef53f01e8b4" exitCode=0 Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.200420 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-px9bz" event={"ID":"dfbfc491-edcf-4ced-88d5-68f3373f5aa7","Type":"ContainerDied","Data":"9224ae6d3e41fb31799d33c0395616bd510a4ea25b4d166b85998ef53f01e8b4"} Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.200436 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-px9bz" event={"ID":"dfbfc491-edcf-4ced-88d5-68f3373f5aa7","Type":"ContainerStarted","Data":"f487ada74c731a26a09603519f57eca3eb356fc4dc4c9f13bf3549a45efa3e4f"} Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.203719 4650 generic.go:334] "Generic (PLEG): container finished" podID="cc47f442-6ef1-4710-8033-9a9367b45a24" containerID="1cd89493603a6be879e8b8e1b224d9ecffd87c85fb286fb9aee3bfc571e25cfd" exitCode=0 Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.203784 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nkfbd" event={"ID":"cc47f442-6ef1-4710-8033-9a9367b45a24","Type":"ContainerDied","Data":"1cd89493603a6be879e8b8e1b224d9ecffd87c85fb286fb9aee3bfc571e25cfd"} Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.203805 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nkfbd" event={"ID":"cc47f442-6ef1-4710-8033-9a9367b45a24","Type":"ContainerStarted","Data":"2f947eb2d73ab3a3a080577de4ac4c3b0e36c2b4f7f8815f34c9a9dd3d846623"} Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.229726 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-1135-account-create-update-n5rhb" podStartSLOduration=2.229710564 podStartE2EDuration="2.229710564s" podCreationTimestamp="2026-02-01 07:40:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:40:22.227095995 +0000 UTC m=+1020.950194250" watchObservedRunningTime="2026-02-01 07:40:22.229710564 +0000 UTC m=+1020.952808809" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.557154 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-f4tcc"] Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.557348 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" podUID="0286473d-0769-476d-962a-aa62b2470cad" containerName="dnsmasq-dns" containerID="cri-o://63f763a04fae8eba25d9109952eee3afbea3236ade42667a38d4bb56c000ece9" gracePeriod=10 Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.622826 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hjbmr"] Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.624069 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.663981 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hjbmr"] Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.679720 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.679979 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.680127 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.680268 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64w2l\" (UniqueName: \"kubernetes.io/projected/73d8cddc-9598-4160-821f-9f2a594b9eb4-kube-api-access-64w2l\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.680302 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-config\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.782539 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64w2l\" (UniqueName: \"kubernetes.io/projected/73d8cddc-9598-4160-821f-9f2a594b9eb4-kube-api-access-64w2l\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.782625 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-config\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.782684 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.782706 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.782776 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.784164 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.784873 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.786732 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.787860 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-config\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.804013 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64w2l\" (UniqueName: \"kubernetes.io/projected/73d8cddc-9598-4160-821f-9f2a594b9eb4-kube-api-access-64w2l\") pod \"dnsmasq-dns-b8fbc5445-hjbmr\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:22 crc kubenswrapper[4650]: I0201 07:40:22.949622 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.214977 4650 generic.go:334] "Generic (PLEG): container finished" podID="0286473d-0769-476d-962a-aa62b2470cad" containerID="63f763a04fae8eba25d9109952eee3afbea3236ade42667a38d4bb56c000ece9" exitCode=0 Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.215175 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" event={"ID":"0286473d-0769-476d-962a-aa62b2470cad","Type":"ContainerDied","Data":"63f763a04fae8eba25d9109952eee3afbea3236ade42667a38d4bb56c000ece9"} Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.435138 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hjbmr"] Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.907172 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.912534 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.914325 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.914336 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.915582 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.919697 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-9w2pk" Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.942178 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 01 07:40:23 crc kubenswrapper[4650]: I0201 07:40:23.999870 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.000076 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a7b8d6-a107-4698-b85d-77d415755428-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.000126 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/78a7b8d6-a107-4698-b85d-77d415755428-cache\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.000432 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/78a7b8d6-a107-4698-b85d-77d415755428-lock\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.000515 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-etc-swift\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.000550 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98p26\" (UniqueName: \"kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-kube-api-access-98p26\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102089 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/78a7b8d6-a107-4698-b85d-77d415755428-lock\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102142 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-etc-swift\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102157 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98p26\" (UniqueName: \"kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-kube-api-access-98p26\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102193 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102236 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a7b8d6-a107-4698-b85d-77d415755428-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102259 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/78a7b8d6-a107-4698-b85d-77d415755428-cache\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102711 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/78a7b8d6-a107-4698-b85d-77d415755428-cache\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.102839 4650 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.102871 4650 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.102932 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-etc-swift podName:78a7b8d6-a107-4698-b85d-77d415755428 nodeName:}" failed. No retries permitted until 2026-02-01 07:40:24.602913757 +0000 UTC m=+1023.326012002 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-etc-swift") pod "swift-storage-0" (UID: "78a7b8d6-a107-4698-b85d-77d415755428") : configmap "swift-ring-files" not found Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.102961 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.103006 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/78a7b8d6-a107-4698-b85d-77d415755428-lock\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.119327 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a7b8d6-a107-4698-b85d-77d415755428-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.126190 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.129062 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98p26\" (UniqueName: \"kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-kube-api-access-98p26\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.222637 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" event={"ID":"73d8cddc-9598-4160-821f-9f2a594b9eb4","Type":"ContainerStarted","Data":"4160d2b1eb52f55e5b74178be80f00e55c971baf2b000ff5c2f52adddfd6df0c"} Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.424650 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.431053 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.438538 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.469403 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-lr89m"] Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.469708 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff691a9-0cd8-42ec-9f8d-1fbe9429566b" containerName="mariadb-account-create-update" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.469724 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff691a9-0cd8-42ec-9f8d-1fbe9429566b" containerName="mariadb-account-create-update" Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.469748 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc47f442-6ef1-4710-8033-9a9367b45a24" containerName="mariadb-database-create" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.469754 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc47f442-6ef1-4710-8033-9a9367b45a24" containerName="mariadb-database-create" Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.469787 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfbfc491-edcf-4ced-88d5-68f3373f5aa7" containerName="mariadb-database-create" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.469793 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfbfc491-edcf-4ced-88d5-68f3373f5aa7" containerName="mariadb-database-create" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.469950 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc47f442-6ef1-4710-8033-9a9367b45a24" containerName="mariadb-database-create" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.469964 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="dff691a9-0cd8-42ec-9f8d-1fbe9429566b" containerName="mariadb-account-create-update" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.469972 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfbfc491-edcf-4ced-88d5-68f3373f5aa7" containerName="mariadb-database-create" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.470576 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.477185 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.477294 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.477497 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.497526 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-lr89m"] Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.509694 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcdh4\" (UniqueName: \"kubernetes.io/projected/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-kube-api-access-xcdh4\") pod \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.510576 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbtkc\" (UniqueName: \"kubernetes.io/projected/cc47f442-6ef1-4710-8033-9a9367b45a24-kube-api-access-nbtkc\") pod \"cc47f442-6ef1-4710-8033-9a9367b45a24\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.510621 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-operator-scripts\") pod \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.510650 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc47f442-6ef1-4710-8033-9a9367b45a24-operator-scripts\") pod \"cc47f442-6ef1-4710-8033-9a9367b45a24\" (UID: \"cc47f442-6ef1-4710-8033-9a9367b45a24\") " Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.510716 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6k9f\" (UniqueName: \"kubernetes.io/projected/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-kube-api-access-q6k9f\") pod \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\" (UID: \"dfbfc491-edcf-4ced-88d5-68f3373f5aa7\") " Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.510765 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-operator-scripts\") pod \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\" (UID: \"dff691a9-0cd8-42ec-9f8d-1fbe9429566b\") " Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511014 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c5a1d51a-35a2-49a9-b337-679c75ddea99-etc-swift\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511265 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dfbfc491-edcf-4ced-88d5-68f3373f5aa7" (UID: "dfbfc491-edcf-4ced-88d5-68f3373f5aa7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511683 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511746 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h79nh\" (UniqueName: \"kubernetes.io/projected/c5a1d51a-35a2-49a9-b337-679c75ddea99-kube-api-access-h79nh\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511771 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-combined-ca-bundle\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511793 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-dispersionconf\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511825 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-swiftconf\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511843 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-scripts\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.511888 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.512249 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc47f442-6ef1-4710-8033-9a9367b45a24-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc47f442-6ef1-4710-8033-9a9367b45a24" (UID: "cc47f442-6ef1-4710-8033-9a9367b45a24"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.512450 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dff691a9-0cd8-42ec-9f8d-1fbe9429566b" (UID: "dff691a9-0cd8-42ec-9f8d-1fbe9429566b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.515344 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-kube-api-access-xcdh4" (OuterVolumeSpecName: "kube-api-access-xcdh4") pod "dff691a9-0cd8-42ec-9f8d-1fbe9429566b" (UID: "dff691a9-0cd8-42ec-9f8d-1fbe9429566b"). InnerVolumeSpecName "kube-api-access-xcdh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.517564 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc47f442-6ef1-4710-8033-9a9367b45a24-kube-api-access-nbtkc" (OuterVolumeSpecName: "kube-api-access-nbtkc") pod "cc47f442-6ef1-4710-8033-9a9367b45a24" (UID: "cc47f442-6ef1-4710-8033-9a9367b45a24"). InnerVolumeSpecName "kube-api-access-nbtkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.525913 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-kube-api-access-q6k9f" (OuterVolumeSpecName: "kube-api-access-q6k9f") pod "dfbfc491-edcf-4ced-88d5-68f3373f5aa7" (UID: "dfbfc491-edcf-4ced-88d5-68f3373f5aa7"). InnerVolumeSpecName "kube-api-access-q6k9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.605324 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.612813 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.612964 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:40:24 crc kubenswrapper[4650]: E0201 07:40:24.613070 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:40:25.113049665 +0000 UTC m=+1023.836147910 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613172 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h79nh\" (UniqueName: \"kubernetes.io/projected/c5a1d51a-35a2-49a9-b337-679c75ddea99-kube-api-access-h79nh\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613262 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-combined-ca-bundle\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613340 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-etc-swift\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613421 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-dispersionconf\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613525 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-swiftconf\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613620 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-scripts\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613712 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c5a1d51a-35a2-49a9-b337-679c75ddea99-etc-swift\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613886 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.613979 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcdh4\" (UniqueName: \"kubernetes.io/projected/dff691a9-0cd8-42ec-9f8d-1fbe9429566b-kube-api-access-xcdh4\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.614060 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbtkc\" (UniqueName: \"kubernetes.io/projected/cc47f442-6ef1-4710-8033-9a9367b45a24-kube-api-access-nbtkc\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.614129 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc47f442-6ef1-4710-8033-9a9367b45a24-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.614192 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6k9f\" (UniqueName: \"kubernetes.io/projected/dfbfc491-edcf-4ced-88d5-68f3373f5aa7-kube-api-access-q6k9f\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.614194 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c5a1d51a-35a2-49a9-b337-679c75ddea99-etc-swift\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.614385 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-scripts\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.617147 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-swiftconf\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.617352 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-dispersionconf\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.617658 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a1d51a-35a2-49a9-b337-679c75ddea99-combined-ca-bundle\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.618630 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a7b8d6-a107-4698-b85d-77d415755428-etc-swift\") pod \"swift-storage-0\" (UID: \"78a7b8d6-a107-4698-b85d-77d415755428\") " pod="openstack/swift-storage-0" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.639721 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h79nh\" (UniqueName: \"kubernetes.io/projected/c5a1d51a-35a2-49a9-b337-679c75ddea99-kube-api-access-h79nh\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.661184 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2hwt"] Feb 01 07:40:24 crc kubenswrapper[4650]: I0201 07:40:24.829972 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.071389 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.129005 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzbmf\" (UniqueName: \"kubernetes.io/projected/0286473d-0769-476d-962a-aa62b2470cad-kube-api-access-mzbmf\") pod \"0286473d-0769-476d-962a-aa62b2470cad\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.129811 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-ovsdbserver-sb\") pod \"0286473d-0769-476d-962a-aa62b2470cad\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.129856 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-dns-svc\") pod \"0286473d-0769-476d-962a-aa62b2470cad\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.129886 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-config\") pod \"0286473d-0769-476d-962a-aa62b2470cad\" (UID: \"0286473d-0769-476d-962a-aa62b2470cad\") " Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.130131 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:25 crc kubenswrapper[4650]: E0201 07:40:25.130316 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:40:25 crc kubenswrapper[4650]: E0201 07:40:25.130364 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:40:26.130351291 +0000 UTC m=+1024.853449536 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.134949 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0286473d-0769-476d-962a-aa62b2470cad-kube-api-access-mzbmf" (OuterVolumeSpecName: "kube-api-access-mzbmf") pod "0286473d-0769-476d-962a-aa62b2470cad" (UID: "0286473d-0769-476d-962a-aa62b2470cad"). InnerVolumeSpecName "kube-api-access-mzbmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.163661 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0286473d-0769-476d-962a-aa62b2470cad" (UID: "0286473d-0769-476d-962a-aa62b2470cad"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.165950 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-config" (OuterVolumeSpecName: "config") pod "0286473d-0769-476d-962a-aa62b2470cad" (UID: "0286473d-0769-476d-962a-aa62b2470cad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.168997 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0286473d-0769-476d-962a-aa62b2470cad" (UID: "0286473d-0769-476d-962a-aa62b2470cad"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.231453 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzbmf\" (UniqueName: \"kubernetes.io/projected/0286473d-0769-476d-962a-aa62b2470cad-kube-api-access-mzbmf\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.231491 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.231500 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.231509 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0286473d-0769-476d-962a-aa62b2470cad-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.232551 4650 generic.go:334] "Generic (PLEG): container finished" podID="49c29250-327f-47ad-b068-f42861c819ab" containerID="47c77622a6e374d174339b58a313b096f458fa48d3f755f722f33343b2680dbc" exitCode=0 Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.232605 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1135-account-create-update-n5rhb" event={"ID":"49c29250-327f-47ad-b068-f42861c819ab","Type":"ContainerDied","Data":"47c77622a6e374d174339b58a313b096f458fa48d3f755f722f33343b2680dbc"} Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.239661 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-px9bz" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.241668 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-px9bz" event={"ID":"dfbfc491-edcf-4ced-88d5-68f3373f5aa7","Type":"ContainerDied","Data":"f487ada74c731a26a09603519f57eca3eb356fc4dc4c9f13bf3549a45efa3e4f"} Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.241713 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f487ada74c731a26a09603519f57eca3eb356fc4dc4c9f13bf3549a45efa3e4f" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.242529 4650 generic.go:334] "Generic (PLEG): container finished" podID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerID="1440c3a7436e9f6067cfa88648c15904656524c2196d8cd41c94596bab34bd60" exitCode=0 Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.243361 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" event={"ID":"73d8cddc-9598-4160-821f-9f2a594b9eb4","Type":"ContainerDied","Data":"1440c3a7436e9f6067cfa88648c15904656524c2196d8cd41c94596bab34bd60"} Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.245464 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nkfbd" event={"ID":"cc47f442-6ef1-4710-8033-9a9367b45a24","Type":"ContainerDied","Data":"2f947eb2d73ab3a3a080577de4ac4c3b0e36c2b4f7f8815f34c9a9dd3d846623"} Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.245486 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f947eb2d73ab3a3a080577de4ac4c3b0e36c2b4f7f8815f34c9a9dd3d846623" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.245533 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nkfbd" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.253588 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f6d0-account-create-update-nhvh2" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.253842 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f6d0-account-create-update-nhvh2" event={"ID":"dff691a9-0cd8-42ec-9f8d-1fbe9429566b","Type":"ContainerDied","Data":"7217270a8f31f7cc5a76a066042e842519ec21267db15d8ea3ac06bce208d84b"} Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.253877 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7217270a8f31f7cc5a76a066042e842519ec21267db15d8ea3ac06bce208d84b" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.258566 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x2hwt" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="registry-server" containerID="cri-o://2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e" gracePeriod=2 Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.258697 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.258872 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-f4tcc" event={"ID":"0286473d-0769-476d-962a-aa62b2470cad","Type":"ContainerDied","Data":"52a8ca047daae1235fe15f32d578baf2a0be4f049b1d7ed183ee9d8cc5a341a5"} Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.259361 4650 scope.go:117] "RemoveContainer" containerID="63f763a04fae8eba25d9109952eee3afbea3236ade42667a38d4bb56c000ece9" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.287689 4650 scope.go:117] "RemoveContainer" containerID="b2c42005fc19b7083f4bdb8ccce373685b37441a1113dae3f62a8d1eb72c1faf" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.342474 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-f4tcc"] Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.347856 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-f4tcc"] Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.460586 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 01 07:40:25 crc kubenswrapper[4650]: W0201 07:40:25.465111 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78a7b8d6_a107_4698_b85d_77d415755428.slice/crio-ef0e36ab21df6d6a7bd0b7a21c09f450be0c91dccc0e0d316c6413fabf88fa40 WatchSource:0}: Error finding container ef0e36ab21df6d6a7bd0b7a21c09f450be0c91dccc0e0d316c6413fabf88fa40: Status 404 returned error can't find the container with id ef0e36ab21df6d6a7bd0b7a21c09f450be0c91dccc0e0d316c6413fabf88fa40 Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.652977 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.747545 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-catalog-content\") pod \"e6b5003e-5d03-4798-9822-873d2ea641b4\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.747930 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-utilities\") pod \"e6b5003e-5d03-4798-9822-873d2ea641b4\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.748256 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndp72\" (UniqueName: \"kubernetes.io/projected/e6b5003e-5d03-4798-9822-873d2ea641b4-kube-api-access-ndp72\") pod \"e6b5003e-5d03-4798-9822-873d2ea641b4\" (UID: \"e6b5003e-5d03-4798-9822-873d2ea641b4\") " Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.748815 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-utilities" (OuterVolumeSpecName: "utilities") pod "e6b5003e-5d03-4798-9822-873d2ea641b4" (UID: "e6b5003e-5d03-4798-9822-873d2ea641b4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.753067 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6b5003e-5d03-4798-9822-873d2ea641b4-kube-api-access-ndp72" (OuterVolumeSpecName: "kube-api-access-ndp72") pod "e6b5003e-5d03-4798-9822-873d2ea641b4" (UID: "e6b5003e-5d03-4798-9822-873d2ea641b4"). InnerVolumeSpecName "kube-api-access-ndp72". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.774775 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e6b5003e-5d03-4798-9822-873d2ea641b4" (UID: "e6b5003e-5d03-4798-9822-873d2ea641b4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.850188 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndp72\" (UniqueName: \"kubernetes.io/projected/e6b5003e-5d03-4798-9822-873d2ea641b4-kube-api-access-ndp72\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.850230 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.850284 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b5003e-5d03-4798-9822-873d2ea641b4-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:25 crc kubenswrapper[4650]: I0201 07:40:25.979179 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0286473d-0769-476d-962a-aa62b2470cad" path="/var/lib/kubelet/pods/0286473d-0769-476d-962a-aa62b2470cad/volumes" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.037204 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-cl46t"] Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.037837 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0286473d-0769-476d-962a-aa62b2470cad" containerName="dnsmasq-dns" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.037894 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="0286473d-0769-476d-962a-aa62b2470cad" containerName="dnsmasq-dns" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.037959 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="extract-utilities" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.037977 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="extract-utilities" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.038081 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0286473d-0769-476d-962a-aa62b2470cad" containerName="init" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.038100 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="0286473d-0769-476d-962a-aa62b2470cad" containerName="init" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.038138 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="extract-content" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.038156 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="extract-content" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.038188 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="registry-server" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.038204 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="registry-server" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.038609 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerName="registry-server" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.038636 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="0286473d-0769-476d-962a-aa62b2470cad" containerName="dnsmasq-dns" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.039917 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.051138 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-cl46t"] Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.152387 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-24db-account-create-update-nngtm"] Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.153318 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.153414 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a8a7b3a-cfd2-43de-9026-fb9511531544-operator-scripts\") pod \"glance-db-create-cl46t\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.153546 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxwqg\" (UniqueName: \"kubernetes.io/projected/6a8a7b3a-cfd2-43de-9026-fb9511531544-kube-api-access-gxwqg\") pod \"glance-db-create-cl46t\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.153625 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.153701 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:40:28.153678797 +0000 UTC m=+1026.876777122 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.153745 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.157795 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.162210 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-24db-account-create-update-nngtm"] Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.181203 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.255630 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2ndr\" (UniqueName: \"kubernetes.io/projected/c6b48882-4a16-4c93-8a4b-3118bea76c46-kube-api-access-r2ndr\") pod \"glance-24db-account-create-update-nngtm\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.255731 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxwqg\" (UniqueName: \"kubernetes.io/projected/6a8a7b3a-cfd2-43de-9026-fb9511531544-kube-api-access-gxwqg\") pod \"glance-db-create-cl46t\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.255859 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a8a7b3a-cfd2-43de-9026-fb9511531544-operator-scripts\") pod \"glance-db-create-cl46t\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.255903 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6b48882-4a16-4c93-8a4b-3118bea76c46-operator-scripts\") pod \"glance-24db-account-create-update-nngtm\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.256750 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a8a7b3a-cfd2-43de-9026-fb9511531544-operator-scripts\") pod \"glance-db-create-cl46t\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.295121 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" event={"ID":"73d8cddc-9598-4160-821f-9f2a594b9eb4","Type":"ContainerStarted","Data":"7328dc1c02bb9990f7c8772431212dc76e5892644429a10b8063d3eee2081556"} Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.295757 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.304332 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxwqg\" (UniqueName: \"kubernetes.io/projected/6a8a7b3a-cfd2-43de-9026-fb9511531544-kube-api-access-gxwqg\") pod \"glance-db-create-cl46t\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.314682 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"ef0e36ab21df6d6a7bd0b7a21c09f450be0c91dccc0e0d316c6413fabf88fa40"} Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.327812 4650 generic.go:334] "Generic (PLEG): container finished" podID="e6b5003e-5d03-4798-9822-873d2ea641b4" containerID="2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e" exitCode=0 Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.328019 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2hwt" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.328110 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2hwt" event={"ID":"e6b5003e-5d03-4798-9822-873d2ea641b4","Type":"ContainerDied","Data":"2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e"} Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.328161 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2hwt" event={"ID":"e6b5003e-5d03-4798-9822-873d2ea641b4","Type":"ContainerDied","Data":"d8f7365a32a2eeef93a1c7a07baefd4e141c01e18efe90adbf8928e5e6ffc0cb"} Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.328178 4650 scope.go:117] "RemoveContainer" containerID="2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.353452 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" podStartSLOduration=4.3534331250000005 podStartE2EDuration="4.353433125s" podCreationTimestamp="2026-02-01 07:40:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:40:26.346216216 +0000 UTC m=+1025.069314471" watchObservedRunningTime="2026-02-01 07:40:26.353433125 +0000 UTC m=+1025.076531370" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.360842 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2ndr\" (UniqueName: \"kubernetes.io/projected/c6b48882-4a16-4c93-8a4b-3118bea76c46-kube-api-access-r2ndr\") pod \"glance-24db-account-create-update-nngtm\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.360981 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6b48882-4a16-4c93-8a4b-3118bea76c46-operator-scripts\") pod \"glance-24db-account-create-update-nngtm\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.361803 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6b48882-4a16-4c93-8a4b-3118bea76c46-operator-scripts\") pod \"glance-24db-account-create-update-nngtm\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.374168 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2hwt"] Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.376206 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-cl46t" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.377107 4650 scope.go:117] "RemoveContainer" containerID="c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.399715 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2hwt"] Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.403849 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2ndr\" (UniqueName: \"kubernetes.io/projected/c6b48882-4a16-4c93-8a4b-3118bea76c46-kube-api-access-r2ndr\") pod \"glance-24db-account-create-update-nngtm\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.480437 4650 scope.go:117] "RemoveContainer" containerID="546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.488375 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.557327 4650 scope.go:117] "RemoveContainer" containerID="2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.566511 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e\": container with ID starting with 2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e not found: ID does not exist" containerID="2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.566559 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e"} err="failed to get container status \"2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e\": rpc error: code = NotFound desc = could not find container \"2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e\": container with ID starting with 2184a6dbe35bbf45c2a0d265ea99628c062b31fe8949c728afc7c9496b3c8f2e not found: ID does not exist" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.566588 4650 scope.go:117] "RemoveContainer" containerID="c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.566864 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3\": container with ID starting with c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3 not found: ID does not exist" containerID="c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.566885 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3"} err="failed to get container status \"c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3\": rpc error: code = NotFound desc = could not find container \"c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3\": container with ID starting with c7d84b1c81c4ee1a49646d00c8517a859702736664531698979d6c033d9207d3 not found: ID does not exist" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.566899 4650 scope.go:117] "RemoveContainer" containerID="546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788" Feb 01 07:40:26 crc kubenswrapper[4650]: E0201 07:40:26.567513 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788\": container with ID starting with 546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788 not found: ID does not exist" containerID="546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788" Feb 01 07:40:26 crc kubenswrapper[4650]: I0201 07:40:26.567554 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788"} err="failed to get container status \"546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788\": rpc error: code = NotFound desc = could not find container \"546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788\": container with ID starting with 546f4b93baf8ffaf2066ac43b0f3544f975ad82f243963aeb7c2786d3d51c788 not found: ID does not exist" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.081356 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-cl46t"] Feb 01 07:40:27 crc kubenswrapper[4650]: W0201 07:40:27.443872 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a8a7b3a_cfd2_43de_9026_fb9511531544.slice/crio-cf6ae5c695153c31ae07d68b1da53eaf8963eba389ef563fd1ab313c93d12df6 WatchSource:0}: Error finding container cf6ae5c695153c31ae07d68b1da53eaf8963eba389ef563fd1ab313c93d12df6: Status 404 returned error can't find the container with id cf6ae5c695153c31ae07d68b1da53eaf8963eba389ef563fd1ab313c93d12df6 Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.512442 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.520292 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-24db-account-create-update-nngtm"] Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.656636 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-ls4nm"] Feb 01 07:40:27 crc kubenswrapper[4650]: E0201 07:40:27.656977 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c29250-327f-47ad-b068-f42861c819ab" containerName="mariadb-account-create-update" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.656994 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c29250-327f-47ad-b068-f42861c819ab" containerName="mariadb-account-create-update" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.657153 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c29250-327f-47ad-b068-f42861c819ab" containerName="mariadb-account-create-update" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.657647 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.662684 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.670147 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-ls4nm"] Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.703283 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2p24z\" (UniqueName: \"kubernetes.io/projected/49c29250-327f-47ad-b068-f42861c819ab-kube-api-access-2p24z\") pod \"49c29250-327f-47ad-b068-f42861c819ab\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.705049 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c29250-327f-47ad-b068-f42861c819ab-operator-scripts\") pod \"49c29250-327f-47ad-b068-f42861c819ab\" (UID: \"49c29250-327f-47ad-b068-f42861c819ab\") " Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.705784 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c29250-327f-47ad-b068-f42861c819ab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "49c29250-327f-47ad-b068-f42861c819ab" (UID: "49c29250-327f-47ad-b068-f42861c819ab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.723518 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c29250-327f-47ad-b068-f42861c819ab-kube-api-access-2p24z" (OuterVolumeSpecName: "kube-api-access-2p24z") pod "49c29250-327f-47ad-b068-f42861c819ab" (UID: "49c29250-327f-47ad-b068-f42861c819ab"). InnerVolumeSpecName "kube-api-access-2p24z". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.807297 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94f99333-91c5-405b-bcff-de7c84179d99-operator-scripts\") pod \"root-account-create-update-ls4nm\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.807335 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfqfm\" (UniqueName: \"kubernetes.io/projected/94f99333-91c5-405b-bcff-de7c84179d99-kube-api-access-nfqfm\") pod \"root-account-create-update-ls4nm\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.807453 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c29250-327f-47ad-b068-f42861c819ab-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.807465 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2p24z\" (UniqueName: \"kubernetes.io/projected/49c29250-327f-47ad-b068-f42861c819ab-kube-api-access-2p24z\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.908365 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94f99333-91c5-405b-bcff-de7c84179d99-operator-scripts\") pod \"root-account-create-update-ls4nm\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.908400 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfqfm\" (UniqueName: \"kubernetes.io/projected/94f99333-91c5-405b-bcff-de7c84179d99-kube-api-access-nfqfm\") pod \"root-account-create-update-ls4nm\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.910198 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94f99333-91c5-405b-bcff-de7c84179d99-operator-scripts\") pod \"root-account-create-update-ls4nm\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.928931 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfqfm\" (UniqueName: \"kubernetes.io/projected/94f99333-91c5-405b-bcff-de7c84179d99-kube-api-access-nfqfm\") pod \"root-account-create-update-ls4nm\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:27 crc kubenswrapper[4650]: I0201 07:40:27.979081 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6b5003e-5d03-4798-9822-873d2ea641b4" path="/var/lib/kubelet/pods/e6b5003e-5d03-4798-9822-873d2ea641b4/volumes" Feb 01 07:40:28 crc kubenswrapper[4650]: I0201 07:40:28.020771 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:28 crc kubenswrapper[4650]: I0201 07:40:28.213561 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:28 crc kubenswrapper[4650]: E0201 07:40:28.213978 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:40:28 crc kubenswrapper[4650]: E0201 07:40:28.214049 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:40:32.214016988 +0000 UTC m=+1030.937115233 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:40:28 crc kubenswrapper[4650]: I0201 07:40:28.435303 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-24db-account-create-update-nngtm" event={"ID":"c6b48882-4a16-4c93-8a4b-3118bea76c46","Type":"ContainerStarted","Data":"3fad983c4a033e8b8736e73b54bb41ceb67e7be652b7f4cbd2a80390a30b8967"} Feb 01 07:40:28 crc kubenswrapper[4650]: I0201 07:40:28.436228 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-cl46t" event={"ID":"6a8a7b3a-cfd2-43de-9026-fb9511531544","Type":"ContainerStarted","Data":"cf6ae5c695153c31ae07d68b1da53eaf8963eba389ef563fd1ab313c93d12df6"} Feb 01 07:40:28 crc kubenswrapper[4650]: I0201 07:40:28.437584 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1135-account-create-update-n5rhb" event={"ID":"49c29250-327f-47ad-b068-f42861c819ab","Type":"ContainerDied","Data":"26ee7fd520e35eb6600fbc7026138ac226b6e996a5bc78b4665fd9d54da785c1"} Feb 01 07:40:28 crc kubenswrapper[4650]: I0201 07:40:28.437696 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26ee7fd520e35eb6600fbc7026138ac226b6e996a5bc78b4665fd9d54da785c1" Feb 01 07:40:28 crc kubenswrapper[4650]: I0201 07:40:28.437863 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1135-account-create-update-n5rhb" Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.054229 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-ls4nm"] Feb 01 07:40:29 crc kubenswrapper[4650]: W0201 07:40:29.060242 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94f99333_91c5_405b_bcff_de7c84179d99.slice/crio-2fb65845a02a4a4926153fdd419c6adf99c8173743809e3d0b9e61f74ab7e8a9 WatchSource:0}: Error finding container 2fb65845a02a4a4926153fdd419c6adf99c8173743809e3d0b9e61f74ab7e8a9: Status 404 returned error can't find the container with id 2fb65845a02a4a4926153fdd419c6adf99c8173743809e3d0b9e61f74ab7e8a9 Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.447359 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ls4nm" event={"ID":"94f99333-91c5-405b-bcff-de7c84179d99","Type":"ContainerStarted","Data":"4d37cf6bbb346b6469aff2307f4012b5e570dcb9bd8d3484f6044bba2bc5248e"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.447403 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ls4nm" event={"ID":"94f99333-91c5-405b-bcff-de7c84179d99","Type":"ContainerStarted","Data":"2fb65845a02a4a4926153fdd419c6adf99c8173743809e3d0b9e61f74ab7e8a9"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.453241 4650 generic.go:334] "Generic (PLEG): container finished" podID="c6b48882-4a16-4c93-8a4b-3118bea76c46" containerID="afb6f7e7a6361166a79d5c9c9c11c3e7a85925839b9e3a25d3c34479c55be767" exitCode=0 Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.453293 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-24db-account-create-update-nngtm" event={"ID":"c6b48882-4a16-4c93-8a4b-3118bea76c46","Type":"ContainerDied","Data":"afb6f7e7a6361166a79d5c9c9c11c3e7a85925839b9e3a25d3c34479c55be767"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.459449 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"919ec043609614c9b647ba97f8573e967d3b610ab7cdcd7d9baad626ea4498af"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.459480 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"326d54b1ea0b5fe1374524e6810598b15201018a808a7fa14bd0c19e4ccbe0a1"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.459490 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"e7e68451b8cf7d29dae2c3584f0e748b91b72e7e9b5f0fb5f0376bbc02b8c6c8"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.459502 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"cd0f18f3f769a44a30687c2dcea2ae03494f9516b676719c67741c1718185684"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.461364 4650 generic.go:334] "Generic (PLEG): container finished" podID="6a8a7b3a-cfd2-43de-9026-fb9511531544" containerID="661db6a99c171dffcf7b9e23a488449dd64a4095b44ea3dd32fb4f78c5362a0a" exitCode=0 Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.461394 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-cl46t" event={"ID":"6a8a7b3a-cfd2-43de-9026-fb9511531544","Type":"ContainerDied","Data":"661db6a99c171dffcf7b9e23a488449dd64a4095b44ea3dd32fb4f78c5362a0a"} Feb 01 07:40:29 crc kubenswrapper[4650]: I0201 07:40:29.473921 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-ls4nm" podStartSLOduration=2.4739038669999998 podStartE2EDuration="2.473903867s" podCreationTimestamp="2026-02-01 07:40:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:40:29.465559768 +0000 UTC m=+1028.188658013" watchObservedRunningTime="2026-02-01 07:40:29.473903867 +0000 UTC m=+1028.197002112" Feb 01 07:40:30 crc kubenswrapper[4650]: I0201 07:40:30.474979 4650 generic.go:334] "Generic (PLEG): container finished" podID="94f99333-91c5-405b-bcff-de7c84179d99" containerID="4d37cf6bbb346b6469aff2307f4012b5e570dcb9bd8d3484f6044bba2bc5248e" exitCode=0 Feb 01 07:40:30 crc kubenswrapper[4650]: I0201 07:40:30.475274 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ls4nm" event={"ID":"94f99333-91c5-405b-bcff-de7c84179d99","Type":"ContainerDied","Data":"4d37cf6bbb346b6469aff2307f4012b5e570dcb9bd8d3484f6044bba2bc5248e"} Feb 01 07:40:30 crc kubenswrapper[4650]: I0201 07:40:30.479270 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="e7e68451b8cf7d29dae2c3584f0e748b91b72e7e9b5f0fb5f0376bbc02b8c6c8" exitCode=1 Feb 01 07:40:30 crc kubenswrapper[4650]: I0201 07:40:30.479311 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"e7e68451b8cf7d29dae2c3584f0e748b91b72e7e9b5f0fb5f0376bbc02b8c6c8"} Feb 01 07:40:30 crc kubenswrapper[4650]: I0201 07:40:30.910987 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.008621 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-cl46t" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.066794 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2ndr\" (UniqueName: \"kubernetes.io/projected/c6b48882-4a16-4c93-8a4b-3118bea76c46-kube-api-access-r2ndr\") pod \"c6b48882-4a16-4c93-8a4b-3118bea76c46\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.066896 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6b48882-4a16-4c93-8a4b-3118bea76c46-operator-scripts\") pod \"c6b48882-4a16-4c93-8a4b-3118bea76c46\" (UID: \"c6b48882-4a16-4c93-8a4b-3118bea76c46\") " Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.068331 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6b48882-4a16-4c93-8a4b-3118bea76c46-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c6b48882-4a16-4c93-8a4b-3118bea76c46" (UID: "c6b48882-4a16-4c93-8a4b-3118bea76c46"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.073237 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6b48882-4a16-4c93-8a4b-3118bea76c46-kube-api-access-r2ndr" (OuterVolumeSpecName: "kube-api-access-r2ndr") pod "c6b48882-4a16-4c93-8a4b-3118bea76c46" (UID: "c6b48882-4a16-4c93-8a4b-3118bea76c46"). InnerVolumeSpecName "kube-api-access-r2ndr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.169837 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a8a7b3a-cfd2-43de-9026-fb9511531544-operator-scripts\") pod \"6a8a7b3a-cfd2-43de-9026-fb9511531544\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.170244 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxwqg\" (UniqueName: \"kubernetes.io/projected/6a8a7b3a-cfd2-43de-9026-fb9511531544-kube-api-access-gxwqg\") pod \"6a8a7b3a-cfd2-43de-9026-fb9511531544\" (UID: \"6a8a7b3a-cfd2-43de-9026-fb9511531544\") " Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.170417 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a8a7b3a-cfd2-43de-9026-fb9511531544-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6a8a7b3a-cfd2-43de-9026-fb9511531544" (UID: "6a8a7b3a-cfd2-43de-9026-fb9511531544"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.170757 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a8a7b3a-cfd2-43de-9026-fb9511531544-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.170775 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2ndr\" (UniqueName: \"kubernetes.io/projected/c6b48882-4a16-4c93-8a4b-3118bea76c46-kube-api-access-r2ndr\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.170786 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6b48882-4a16-4c93-8a4b-3118bea76c46-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.173936 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a8a7b3a-cfd2-43de-9026-fb9511531544-kube-api-access-gxwqg" (OuterVolumeSpecName: "kube-api-access-gxwqg") pod "6a8a7b3a-cfd2-43de-9026-fb9511531544" (UID: "6a8a7b3a-cfd2-43de-9026-fb9511531544"). InnerVolumeSpecName "kube-api-access-gxwqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.294917 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxwqg\" (UniqueName: \"kubernetes.io/projected/6a8a7b3a-cfd2-43de-9026-fb9511531544-kube-api-access-gxwqg\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.489485 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-24db-account-create-update-nngtm" event={"ID":"c6b48882-4a16-4c93-8a4b-3118bea76c46","Type":"ContainerDied","Data":"3fad983c4a033e8b8736e73b54bb41ceb67e7be652b7f4cbd2a80390a30b8967"} Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.489543 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fad983c4a033e8b8736e73b54bb41ceb67e7be652b7f4cbd2a80390a30b8967" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.489513 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-24db-account-create-update-nngtm" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.493114 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"6f66535bdcc720ca6331a25502406a022cc11d5deb1d240c85548ae491d10847"} Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.493149 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"f97bae70a659d9379e0b9aa155c195b120fbabe54eb51979918bf8742225ce0c"} Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.493183 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"5545383bd4f003b8eae1107099fa764b18ce35bbb9c642c3cabdac9b136df4a7"} Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.493199 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"8fd64438f7891c841e3cdcfbe940de7d1012268fb51be125aeb4a739c5f7871d"} Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.494613 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-cl46t" event={"ID":"6a8a7b3a-cfd2-43de-9026-fb9511531544","Type":"ContainerDied","Data":"cf6ae5c695153c31ae07d68b1da53eaf8963eba389ef563fd1ab313c93d12df6"} Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.494649 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf6ae5c695153c31ae07d68b1da53eaf8963eba389ef563fd1ab313c93d12df6" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.494654 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-cl46t" Feb 01 07:40:31 crc kubenswrapper[4650]: I0201 07:40:31.877924 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.003872 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfqfm\" (UniqueName: \"kubernetes.io/projected/94f99333-91c5-405b-bcff-de7c84179d99-kube-api-access-nfqfm\") pod \"94f99333-91c5-405b-bcff-de7c84179d99\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.004177 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94f99333-91c5-405b-bcff-de7c84179d99-operator-scripts\") pod \"94f99333-91c5-405b-bcff-de7c84179d99\" (UID: \"94f99333-91c5-405b-bcff-de7c84179d99\") " Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.005261 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94f99333-91c5-405b-bcff-de7c84179d99-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "94f99333-91c5-405b-bcff-de7c84179d99" (UID: "94f99333-91c5-405b-bcff-de7c84179d99"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.021160 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94f99333-91c5-405b-bcff-de7c84179d99-kube-api-access-nfqfm" (OuterVolumeSpecName: "kube-api-access-nfqfm") pod "94f99333-91c5-405b-bcff-de7c84179d99" (UID: "94f99333-91c5-405b-bcff-de7c84179d99"). InnerVolumeSpecName "kube-api-access-nfqfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.106751 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfqfm\" (UniqueName: \"kubernetes.io/projected/94f99333-91c5-405b-bcff-de7c84179d99-kube-api-access-nfqfm\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.106786 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94f99333-91c5-405b-bcff-de7c84179d99-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.310821 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:32 crc kubenswrapper[4650]: E0201 07:40:32.311015 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:40:32 crc kubenswrapper[4650]: E0201 07:40:32.311280 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:40:40.311262035 +0000 UTC m=+1039.034360280 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.502692 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ls4nm" event={"ID":"94f99333-91c5-405b-bcff-de7c84179d99","Type":"ContainerDied","Data":"2fb65845a02a4a4926153fdd419c6adf99c8173743809e3d0b9e61f74ab7e8a9"} Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.502738 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2fb65845a02a4a4926153fdd419c6adf99c8173743809e3d0b9e61f74ab7e8a9" Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.502751 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ls4nm" Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.507420 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="5545383bd4f003b8eae1107099fa764b18ce35bbb9c642c3cabdac9b136df4a7" exitCode=1 Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.507447 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"5545383bd4f003b8eae1107099fa764b18ce35bbb9c642c3cabdac9b136df4a7"} Feb 01 07:40:32 crc kubenswrapper[4650]: I0201 07:40:32.954604 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.021276 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-9fxzp"] Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.021722 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-9fxzp" podUID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerName="dnsmasq-dns" containerID="cri-o://bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d" gracePeriod=10 Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.478998 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.517114 4650 generic.go:334] "Generic (PLEG): container finished" podID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerID="bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d" exitCode=0 Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.517192 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-9fxzp" event={"ID":"49d5b7da-4df2-4c94-8dcd-8e3fbf589474","Type":"ContainerDied","Data":"bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d"} Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.517222 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-9fxzp" event={"ID":"49d5b7da-4df2-4c94-8dcd-8e3fbf589474","Type":"ContainerDied","Data":"9116ee116dc168c3d1964bd7a9927cc5cee946c13fe54ddfb63e8c8ba8c9119b"} Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.517242 4650 scope.go:117] "RemoveContainer" containerID="bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.517403 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-9fxzp" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.528356 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"092c036764bc43b6c02ff54e9eb3b67f429c327f120c40c9f66cf098fe79dc37"} Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.529065 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"bd291405c365b9ebbc808fe9137c489b397ce43572ed9f0cc0bf7a18a6fedf60"} Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.529086 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"f946571ea708ff219948f657b64f9f86f6b5223f920512ac079be250fe77d434"} Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.529096 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"a2f245de60987498c1e6bb70859aa55f0788c7b7b797afccd6cd432b4a30454a"} Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.561720 4650 scope.go:117] "RemoveContainer" containerID="d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.590591 4650 scope.go:117] "RemoveContainer" containerID="bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d" Feb 01 07:40:33 crc kubenswrapper[4650]: E0201 07:40:33.591071 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d\": container with ID starting with bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d not found: ID does not exist" containerID="bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.591125 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d"} err="failed to get container status \"bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d\": rpc error: code = NotFound desc = could not find container \"bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d\": container with ID starting with bdeb1978c7e8dc15353947af2694cc1bcd536af0da207db8a13e51566ffd771d not found: ID does not exist" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.591152 4650 scope.go:117] "RemoveContainer" containerID="d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964" Feb 01 07:40:33 crc kubenswrapper[4650]: E0201 07:40:33.591723 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964\": container with ID starting with d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964 not found: ID does not exist" containerID="d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.591754 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964"} err="failed to get container status \"d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964\": rpc error: code = NotFound desc = could not find container \"d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964\": container with ID starting with d96ef25122ab98881ae95891f79172bae77ca7a2f6a696fa87bb769c849fd964 not found: ID does not exist" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.636624 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9r7f\" (UniqueName: \"kubernetes.io/projected/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-kube-api-access-p9r7f\") pod \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.636712 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-sb\") pod \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.636887 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-nb\") pod \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.636929 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-config\") pod \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.637102 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-dns-svc\") pod \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\" (UID: \"49d5b7da-4df2-4c94-8dcd-8e3fbf589474\") " Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.641159 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-kube-api-access-p9r7f" (OuterVolumeSpecName: "kube-api-access-p9r7f") pod "49d5b7da-4df2-4c94-8dcd-8e3fbf589474" (UID: "49d5b7da-4df2-4c94-8dcd-8e3fbf589474"). InnerVolumeSpecName "kube-api-access-p9r7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.682479 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "49d5b7da-4df2-4c94-8dcd-8e3fbf589474" (UID: "49d5b7da-4df2-4c94-8dcd-8e3fbf589474"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.684715 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "49d5b7da-4df2-4c94-8dcd-8e3fbf589474" (UID: "49d5b7da-4df2-4c94-8dcd-8e3fbf589474"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.685416 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-config" (OuterVolumeSpecName: "config") pod "49d5b7da-4df2-4c94-8dcd-8e3fbf589474" (UID: "49d5b7da-4df2-4c94-8dcd-8e3fbf589474"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.700432 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "49d5b7da-4df2-4c94-8dcd-8e3fbf589474" (UID: "49d5b7da-4df2-4c94-8dcd-8e3fbf589474"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.739519 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.739554 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.739563 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.739574 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9r7f\" (UniqueName: \"kubernetes.io/projected/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-kube-api-access-p9r7f\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.739586 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49d5b7da-4df2-4c94-8dcd-8e3fbf589474-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.853901 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-9fxzp"] Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.860308 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-9fxzp"] Feb 01 07:40:33 crc kubenswrapper[4650]: I0201 07:40:33.979816 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" path="/var/lib/kubelet/pods/49d5b7da-4df2-4c94-8dcd-8e3fbf589474/volumes" Feb 01 07:40:34 crc kubenswrapper[4650]: I0201 07:40:34.063426 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-ls4nm"] Feb 01 07:40:34 crc kubenswrapper[4650]: I0201 07:40:34.070136 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-ls4nm"] Feb 01 07:40:35 crc kubenswrapper[4650]: I0201 07:40:35.987075 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94f99333-91c5-405b-bcff-de7c84179d99" path="/var/lib/kubelet/pods/94f99333-91c5-405b-bcff-de7c84179d99/volumes" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.246084 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254280 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-bkqlt"] Feb 01 07:40:36 crc kubenswrapper[4650]: E0201 07:40:36.254626 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerName="init" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254646 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerName="init" Feb 01 07:40:36 crc kubenswrapper[4650]: E0201 07:40:36.254660 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f99333-91c5-405b-bcff-de7c84179d99" containerName="mariadb-account-create-update" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254670 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f99333-91c5-405b-bcff-de7c84179d99" containerName="mariadb-account-create-update" Feb 01 07:40:36 crc kubenswrapper[4650]: E0201 07:40:36.254697 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a8a7b3a-cfd2-43de-9026-fb9511531544" containerName="mariadb-database-create" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254706 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a8a7b3a-cfd2-43de-9026-fb9511531544" containerName="mariadb-database-create" Feb 01 07:40:36 crc kubenswrapper[4650]: E0201 07:40:36.254718 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6b48882-4a16-4c93-8a4b-3118bea76c46" containerName="mariadb-account-create-update" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254727 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6b48882-4a16-4c93-8a4b-3118bea76c46" containerName="mariadb-account-create-update" Feb 01 07:40:36 crc kubenswrapper[4650]: E0201 07:40:36.254751 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerName="dnsmasq-dns" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254759 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerName="dnsmasq-dns" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254973 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="94f99333-91c5-405b-bcff-de7c84179d99" containerName="mariadb-account-create-update" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.254997 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="49d5b7da-4df2-4c94-8dcd-8e3fbf589474" containerName="dnsmasq-dns" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.255016 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6b48882-4a16-4c93-8a4b-3118bea76c46" containerName="mariadb-account-create-update" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.255049 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a8a7b3a-cfd2-43de-9026-fb9511531544" containerName="mariadb-database-create" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.255630 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.261246 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.262555 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-bc68g" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.268758 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bkqlt"] Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.390860 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nzxw\" (UniqueName: \"kubernetes.io/projected/b01aeb4f-ec32-444e-b714-6ab54c79bad3-kube-api-access-8nzxw\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.390897 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-db-sync-config-data\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.390944 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-combined-ca-bundle\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.390991 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-config-data\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.492584 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-db-sync-config-data\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.492645 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-combined-ca-bundle\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.492709 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-config-data\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.492794 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nzxw\" (UniqueName: \"kubernetes.io/projected/b01aeb4f-ec32-444e-b714-6ab54c79bad3-kube-api-access-8nzxw\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.497188 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-db-sync-config-data\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.497481 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-config-data\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.499108 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-combined-ca-bundle\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.516112 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nzxw\" (UniqueName: \"kubernetes.io/projected/b01aeb4f-ec32-444e-b714-6ab54c79bad3-kube-api-access-8nzxw\") pod \"glance-db-sync-bkqlt\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.558089 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="26231752c14d48d39042e16e665d58e2637dd86c80f2d4c1cd067bc74266ffb3" exitCode=1 Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.558143 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"ecea3693964bf2c35381291db84f3db21297e3cbfe0efc1dfc2704704f0f03e2"} Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.558184 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"516cebc379bc43867b51f5f0f970b91996b37ab72d0cec548ef5da0df5f9a8a2"} Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.558194 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"26231752c14d48d39042e16e665d58e2637dd86c80f2d4c1cd067bc74266ffb3"} Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.558738 4650 scope.go:117] "RemoveContainer" containerID="e7e68451b8cf7d29dae2c3584f0e748b91b72e7e9b5f0fb5f0376bbc02b8c6c8" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.558831 4650 scope.go:117] "RemoveContainer" containerID="5545383bd4f003b8eae1107099fa764b18ce35bbb9c642c3cabdac9b136df4a7" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.558938 4650 scope.go:117] "RemoveContainer" containerID="26231752c14d48d39042e16e665d58e2637dd86c80f2d4c1cd067bc74266ffb3" Feb 01 07:40:36 crc kubenswrapper[4650]: I0201 07:40:36.626355 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bkqlt" Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.147019 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bkqlt"] Feb 01 07:40:37 crc kubenswrapper[4650]: W0201 07:40:37.151098 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb01aeb4f_ec32_444e_b714_6ab54c79bad3.slice/crio-05886c72c66341b5d156df96f39552d2c9d7b05c9b0a6f034a028087d740e7e5 WatchSource:0}: Error finding container 05886c72c66341b5d156df96f39552d2c9d7b05c9b0a6f034a028087d740e7e5: Status 404 returned error can't find the container with id 05886c72c66341b5d156df96f39552d2c9d7b05c9b0a6f034a028087d740e7e5 Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.574804 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bkqlt" event={"ID":"b01aeb4f-ec32-444e-b714-6ab54c79bad3","Type":"ContainerStarted","Data":"05886c72c66341b5d156df96f39552d2c9d7b05c9b0a6f034a028087d740e7e5"} Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.583615 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="8d4501dcd879dbdb80eff4ccd4902baa166072d663a61fa761da4cbfe17277c5" exitCode=1 Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.583663 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="24df1c6ff930fe7d1a57bef4c75502d4fc67053987b67722936734770b770632" exitCode=1 Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.583682 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b"} Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.583703 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"8d4501dcd879dbdb80eff4ccd4902baa166072d663a61fa761da4cbfe17277c5"} Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.583716 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"24df1c6ff930fe7d1a57bef4c75502d4fc67053987b67722936734770b770632"} Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.583732 4650 scope.go:117] "RemoveContainer" containerID="5545383bd4f003b8eae1107099fa764b18ce35bbb9c642c3cabdac9b136df4a7" Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.584423 4650 scope.go:117] "RemoveContainer" containerID="24df1c6ff930fe7d1a57bef4c75502d4fc67053987b67722936734770b770632" Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.584484 4650 scope.go:117] "RemoveContainer" containerID="8d4501dcd879dbdb80eff4ccd4902baa166072d663a61fa761da4cbfe17277c5" Feb 01 07:40:37 crc kubenswrapper[4650]: E0201 07:40:37.584885 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:40:37 crc kubenswrapper[4650]: I0201 07:40:37.633573 4650 scope.go:117] "RemoveContainer" containerID="e7e68451b8cf7d29dae2c3584f0e748b91b72e7e9b5f0fb5f0376bbc02b8c6c8" Feb 01 07:40:37 crc kubenswrapper[4650]: E0201 07:40:37.837283 4650 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78a7b8d6_a107_4698_b85d_77d415755428.slice/crio-a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b.scope\": RecentStats: unable to find data in memory cache]" Feb 01 07:40:38 crc kubenswrapper[4650]: I0201 07:40:38.600114 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b" exitCode=1 Feb 01 07:40:38 crc kubenswrapper[4650]: I0201 07:40:38.600245 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b"} Feb 01 07:40:38 crc kubenswrapper[4650]: I0201 07:40:38.600460 4650 scope.go:117] "RemoveContainer" containerID="26231752c14d48d39042e16e665d58e2637dd86c80f2d4c1cd067bc74266ffb3" Feb 01 07:40:38 crc kubenswrapper[4650]: I0201 07:40:38.602141 4650 scope.go:117] "RemoveContainer" containerID="24df1c6ff930fe7d1a57bef4c75502d4fc67053987b67722936734770b770632" Feb 01 07:40:38 crc kubenswrapper[4650]: I0201 07:40:38.602219 4650 scope.go:117] "RemoveContainer" containerID="8d4501dcd879dbdb80eff4ccd4902baa166072d663a61fa761da4cbfe17277c5" Feb 01 07:40:38 crc kubenswrapper[4650]: I0201 07:40:38.602320 4650 scope.go:117] "RemoveContainer" containerID="a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b" Feb 01 07:40:38 crc kubenswrapper[4650]: E0201 07:40:38.602946 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.086710 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-fpjpx"] Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.093051 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.095179 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.099978 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fpjpx"] Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.148614 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cad671-0078-4ca0-a66c-53b9d93adb4a-operator-scripts\") pod \"root-account-create-update-fpjpx\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.148973 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9wxl\" (UniqueName: \"kubernetes.io/projected/97cad671-0078-4ca0-a66c-53b9d93adb4a-kube-api-access-c9wxl\") pod \"root-account-create-update-fpjpx\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.250834 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9wxl\" (UniqueName: \"kubernetes.io/projected/97cad671-0078-4ca0-a66c-53b9d93adb4a-kube-api-access-c9wxl\") pod \"root-account-create-update-fpjpx\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.250982 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cad671-0078-4ca0-a66c-53b9d93adb4a-operator-scripts\") pod \"root-account-create-update-fpjpx\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.251956 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cad671-0078-4ca0-a66c-53b9d93adb4a-operator-scripts\") pod \"root-account-create-update-fpjpx\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.270056 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9wxl\" (UniqueName: \"kubernetes.io/projected/97cad671-0078-4ca0-a66c-53b9d93adb4a-kube-api-access-c9wxl\") pod \"root-account-create-update-fpjpx\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.414610 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.609520 4650 generic.go:334] "Generic (PLEG): container finished" podID="722b2919-c0d6-4596-82cc-5ae2b5951263" containerID="32437ee89767575c4ab78143becb2f56aa0392cfd1b4b00d44cd1c32a22178c5" exitCode=0 Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.609582 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"722b2919-c0d6-4596-82cc-5ae2b5951263","Type":"ContainerDied","Data":"32437ee89767575c4ab78143becb2f56aa0392cfd1b4b00d44cd1c32a22178c5"} Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.617282 4650 scope.go:117] "RemoveContainer" containerID="24df1c6ff930fe7d1a57bef4c75502d4fc67053987b67722936734770b770632" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.617646 4650 scope.go:117] "RemoveContainer" containerID="8d4501dcd879dbdb80eff4ccd4902baa166072d663a61fa761da4cbfe17277c5" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.617734 4650 scope.go:117] "RemoveContainer" containerID="a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b" Feb 01 07:40:39 crc kubenswrapper[4650]: E0201 07:40:39.618192 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:40:39 crc kubenswrapper[4650]: I0201 07:40:39.844813 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fpjpx"] Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.369147 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:40 crc kubenswrapper[4650]: E0201 07:40:40.369419 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:40:40 crc kubenswrapper[4650]: E0201 07:40:40.369494 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:40:56.369474986 +0000 UTC m=+1055.092573231 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.626162 4650 generic.go:334] "Generic (PLEG): container finished" podID="97cad671-0078-4ca0-a66c-53b9d93adb4a" containerID="da46aa12b1173720990b7676be01c57190bdd6a845a02438538595c9c31b4a09" exitCode=0 Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.626228 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fpjpx" event={"ID":"97cad671-0078-4ca0-a66c-53b9d93adb4a","Type":"ContainerDied","Data":"da46aa12b1173720990b7676be01c57190bdd6a845a02438538595c9c31b4a09"} Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.626290 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fpjpx" event={"ID":"97cad671-0078-4ca0-a66c-53b9d93adb4a","Type":"ContainerStarted","Data":"9325118b8ff89ab8453f5066df449fefd4e2ce9ffd3f6b20af53e7c96795d099"} Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.630770 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"722b2919-c0d6-4596-82cc-5ae2b5951263","Type":"ContainerStarted","Data":"c1fd7dd8fead7918b16a0b548fc9f38f24d123aa226f8c39a7a6e8ac8642d182"} Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.631043 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.632636 4650 generic.go:334] "Generic (PLEG): container finished" podID="9c378d90-fab5-4d68-9aba-892645206b97" containerID="ff627a2b40a0ccff03027386f28859d19e57b613c37de01159985101a0578d2b" exitCode=0 Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.632686 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c378d90-fab5-4d68-9aba-892645206b97","Type":"ContainerDied","Data":"ff627a2b40a0ccff03027386f28859d19e57b613c37de01159985101a0578d2b"} Feb 01 07:40:40 crc kubenswrapper[4650]: I0201 07:40:40.689213 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.630764173 podStartE2EDuration="1m4.689188104s" podCreationTimestamp="2026-02-01 07:39:36 +0000 UTC" firstStartedPulling="2026-02-01 07:39:38.031668637 +0000 UTC m=+976.754766882" lastFinishedPulling="2026-02-01 07:40:04.090092568 +0000 UTC m=+1002.813190813" observedRunningTime="2026-02-01 07:40:40.684145321 +0000 UTC m=+1039.407243566" watchObservedRunningTime="2026-02-01 07:40:40.689188104 +0000 UTC m=+1039.412286369" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.425924 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.431415 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-df4vg" podUID="0eea2c6a-8650-4a55-aab9-0b27b8e829b4" containerName="ovn-controller" probeResult="failure" output=< Feb 01 07:40:41 crc kubenswrapper[4650]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 01 07:40:41 crc kubenswrapper[4650]: > Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.443190 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-9xcg8" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.645904 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c378d90-fab5-4d68-9aba-892645206b97","Type":"ContainerStarted","Data":"7c017792f5efea70064078050816337a8c6884c63f47b9616f3fa1ca02fde8e0"} Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.657071 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-df4vg-config-26bl2"] Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.658088 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.661959 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.680383 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg-config-26bl2"] Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.695405 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-log-ovn\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.695481 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-additional-scripts\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.695513 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r54mm\" (UniqueName: \"kubernetes.io/projected/fa74ae54-d018-46cd-9821-0472050a483d-kube-api-access-r54mm\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.695540 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.695563 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run-ovn\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.695621 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-scripts\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.706775 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.715092517 podStartE2EDuration="1m5.7067418s" podCreationTimestamp="2026-02-01 07:39:36 +0000 UTC" firstStartedPulling="2026-02-01 07:39:38.526210846 +0000 UTC m=+977.249309091" lastFinishedPulling="2026-02-01 07:40:05.517860129 +0000 UTC m=+1004.240958374" observedRunningTime="2026-02-01 07:40:41.696093099 +0000 UTC m=+1040.419191364" watchObservedRunningTime="2026-02-01 07:40:41.7067418 +0000 UTC m=+1040.429840055" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.797757 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.797825 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run-ovn\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.797925 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-scripts\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.798045 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-log-ovn\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.798139 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-additional-scripts\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.798212 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r54mm\" (UniqueName: \"kubernetes.io/projected/fa74ae54-d018-46cd-9821-0472050a483d-kube-api-access-r54mm\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.799970 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.800103 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-log-ovn\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.800173 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-additional-scripts\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.800611 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run-ovn\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.810042 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-scripts\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.844961 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r54mm\" (UniqueName: \"kubernetes.io/projected/fa74ae54-d018-46cd-9821-0472050a483d-kube-api-access-r54mm\") pod \"ovn-controller-df4vg-config-26bl2\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:41 crc kubenswrapper[4650]: I0201 07:40:41.984862 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.135773 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.205056 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cad671-0078-4ca0-a66c-53b9d93adb4a-operator-scripts\") pod \"97cad671-0078-4ca0-a66c-53b9d93adb4a\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.205223 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9wxl\" (UniqueName: \"kubernetes.io/projected/97cad671-0078-4ca0-a66c-53b9d93adb4a-kube-api-access-c9wxl\") pod \"97cad671-0078-4ca0-a66c-53b9d93adb4a\" (UID: \"97cad671-0078-4ca0-a66c-53b9d93adb4a\") " Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.208364 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97cad671-0078-4ca0-a66c-53b9d93adb4a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "97cad671-0078-4ca0-a66c-53b9d93adb4a" (UID: "97cad671-0078-4ca0-a66c-53b9d93adb4a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.210020 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97cad671-0078-4ca0-a66c-53b9d93adb4a-kube-api-access-c9wxl" (OuterVolumeSpecName: "kube-api-access-c9wxl") pod "97cad671-0078-4ca0-a66c-53b9d93adb4a" (UID: "97cad671-0078-4ca0-a66c-53b9d93adb4a"). InnerVolumeSpecName "kube-api-access-c9wxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.307040 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cad671-0078-4ca0-a66c-53b9d93adb4a-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.307070 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9wxl\" (UniqueName: \"kubernetes.io/projected/97cad671-0078-4ca0-a66c-53b9d93adb4a-kube-api-access-c9wxl\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.474582 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg-config-26bl2"] Feb 01 07:40:42 crc kubenswrapper[4650]: W0201 07:40:42.476392 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa74ae54_d018_46cd_9821_0472050a483d.slice/crio-968b6982cef41e81642cdaaad5dcfc1f8b8b7403439b74c3ae93827d3b79cbb6 WatchSource:0}: Error finding container 968b6982cef41e81642cdaaad5dcfc1f8b8b7403439b74c3ae93827d3b79cbb6: Status 404 returned error can't find the container with id 968b6982cef41e81642cdaaad5dcfc1f8b8b7403439b74c3ae93827d3b79cbb6 Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.653808 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-26bl2" event={"ID":"fa74ae54-d018-46cd-9821-0472050a483d","Type":"ContainerStarted","Data":"968b6982cef41e81642cdaaad5dcfc1f8b8b7403439b74c3ae93827d3b79cbb6"} Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.656145 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fpjpx" event={"ID":"97cad671-0078-4ca0-a66c-53b9d93adb4a","Type":"ContainerDied","Data":"9325118b8ff89ab8453f5066df449fefd4e2ce9ffd3f6b20af53e7c96795d099"} Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.656192 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9325118b8ff89ab8453f5066df449fefd4e2ce9ffd3f6b20af53e7c96795d099" Feb 01 07:40:42 crc kubenswrapper[4650]: I0201 07:40:42.656245 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fpjpx" Feb 01 07:40:43 crc kubenswrapper[4650]: I0201 07:40:43.663864 4650 generic.go:334] "Generic (PLEG): container finished" podID="fa74ae54-d018-46cd-9821-0472050a483d" containerID="0a8ef4a2aa7b93cd6fdaea9e16240a2274799d2c30db543919ffee7f3af2d11c" exitCode=0 Feb 01 07:40:43 crc kubenswrapper[4650]: I0201 07:40:43.663993 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-26bl2" event={"ID":"fa74ae54-d018-46cd-9821-0472050a483d","Type":"ContainerDied","Data":"0a8ef4a2aa7b93cd6fdaea9e16240a2274799d2c30db543919ffee7f3af2d11c"} Feb 01 07:40:46 crc kubenswrapper[4650]: I0201 07:40:46.442619 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-df4vg" Feb 01 07:40:47 crc kubenswrapper[4650]: I0201 07:40:47.976352 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.259326 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.356395 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-log-ovn\") pod \"fa74ae54-d018-46cd-9821-0472050a483d\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.356484 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-scripts\") pod \"fa74ae54-d018-46cd-9821-0472050a483d\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.356507 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r54mm\" (UniqueName: \"kubernetes.io/projected/fa74ae54-d018-46cd-9821-0472050a483d-kube-api-access-r54mm\") pod \"fa74ae54-d018-46cd-9821-0472050a483d\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.356623 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-additional-scripts\") pod \"fa74ae54-d018-46cd-9821-0472050a483d\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.356647 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run-ovn\") pod \"fa74ae54-d018-46cd-9821-0472050a483d\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.356683 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run\") pod \"fa74ae54-d018-46cd-9821-0472050a483d\" (UID: \"fa74ae54-d018-46cd-9821-0472050a483d\") " Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.357205 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run" (OuterVolumeSpecName: "var-run") pod "fa74ae54-d018-46cd-9821-0472050a483d" (UID: "fa74ae54-d018-46cd-9821-0472050a483d"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.357258 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "fa74ae54-d018-46cd-9821-0472050a483d" (UID: "fa74ae54-d018-46cd-9821-0472050a483d"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.358057 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "fa74ae54-d018-46cd-9821-0472050a483d" (UID: "fa74ae54-d018-46cd-9821-0472050a483d"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.358433 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-scripts" (OuterVolumeSpecName: "scripts") pod "fa74ae54-d018-46cd-9821-0472050a483d" (UID: "fa74ae54-d018-46cd-9821-0472050a483d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.359097 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "fa74ae54-d018-46cd-9821-0472050a483d" (UID: "fa74ae54-d018-46cd-9821-0472050a483d"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.366208 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa74ae54-d018-46cd-9821-0472050a483d-kube-api-access-r54mm" (OuterVolumeSpecName: "kube-api-access-r54mm") pod "fa74ae54-d018-46cd-9821-0472050a483d" (UID: "fa74ae54-d018-46cd-9821-0472050a483d"). InnerVolumeSpecName "kube-api-access-r54mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.458013 4650 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.458373 4650 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.458388 4650 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.458399 4650 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fa74ae54-d018-46cd-9821-0472050a483d-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.458414 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa74ae54-d018-46cd-9821-0472050a483d-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.458425 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r54mm\" (UniqueName: \"kubernetes.io/projected/fa74ae54-d018-46cd-9821-0472050a483d-kube-api-access-r54mm\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.722647 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-26bl2" event={"ID":"fa74ae54-d018-46cd-9821-0472050a483d","Type":"ContainerDied","Data":"968b6982cef41e81642cdaaad5dcfc1f8b8b7403439b74c3ae93827d3b79cbb6"} Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.722686 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="968b6982cef41e81642cdaaad5dcfc1f8b8b7403439b74c3ae93827d3b79cbb6" Feb 01 07:40:51 crc kubenswrapper[4650]: I0201 07:40:51.722735 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-26bl2" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.368468 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-df4vg-config-26bl2"] Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.374662 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-df4vg-config-26bl2"] Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.429466 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-df4vg-config-pnbld"] Feb 01 07:40:52 crc kubenswrapper[4650]: E0201 07:40:52.429853 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97cad671-0078-4ca0-a66c-53b9d93adb4a" containerName="mariadb-account-create-update" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.429876 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="97cad671-0078-4ca0-a66c-53b9d93adb4a" containerName="mariadb-account-create-update" Feb 01 07:40:52 crc kubenswrapper[4650]: E0201 07:40:52.429900 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa74ae54-d018-46cd-9821-0472050a483d" containerName="ovn-config" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.429908 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa74ae54-d018-46cd-9821-0472050a483d" containerName="ovn-config" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.430143 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa74ae54-d018-46cd-9821-0472050a483d" containerName="ovn-config" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.430169 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="97cad671-0078-4ca0-a66c-53b9d93adb4a" containerName="mariadb-account-create-update" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.430753 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.436233 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.445779 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg-config-pnbld"] Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.473918 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-additional-scripts\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.474054 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.474074 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-log-ovn\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.474104 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-scripts\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.474133 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-755v2\" (UniqueName: \"kubernetes.io/projected/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-kube-api-access-755v2\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.474222 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run-ovn\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575343 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-scripts\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575394 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-755v2\" (UniqueName: \"kubernetes.io/projected/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-kube-api-access-755v2\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575425 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run-ovn\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575466 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-additional-scripts\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575566 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575584 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-log-ovn\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575819 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-log-ovn\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575809 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run-ovn\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.575864 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.576624 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-additional-scripts\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.579837 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-scripts\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.611111 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-755v2\" (UniqueName: \"kubernetes.io/projected/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-kube-api-access-755v2\") pod \"ovn-controller-df4vg-config-pnbld\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.731899 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bkqlt" event={"ID":"b01aeb4f-ec32-444e-b714-6ab54c79bad3","Type":"ContainerStarted","Data":"a4c3419430b19047f133d5cd68eeaeef7602934e5639e9f02510aaec85580f3f"} Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.746286 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:52 crc kubenswrapper[4650]: I0201 07:40:52.753361 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-bkqlt" podStartSLOduration=2.6447918230000003 podStartE2EDuration="16.753340638s" podCreationTimestamp="2026-02-01 07:40:36 +0000 UTC" firstStartedPulling="2026-02-01 07:40:37.153982481 +0000 UTC m=+1035.877080726" lastFinishedPulling="2026-02-01 07:40:51.262531306 +0000 UTC m=+1049.985629541" observedRunningTime="2026-02-01 07:40:52.745009638 +0000 UTC m=+1051.468107893" watchObservedRunningTime="2026-02-01 07:40:52.753340638 +0000 UTC m=+1051.476438893" Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:52.967099 4650 scope.go:117] "RemoveContainer" containerID="24df1c6ff930fe7d1a57bef4c75502d4fc67053987b67722936734770b770632" Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:52.967548 4650 scope.go:117] "RemoveContainer" containerID="8d4501dcd879dbdb80eff4ccd4902baa166072d663a61fa761da4cbfe17277c5" Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:52.967666 4650 scope.go:117] "RemoveContainer" containerID="a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b" Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.206955 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg-config-pnbld"] Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.747621 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="d2f715a393c524210c739be45a6809ad759184af4ae9bdac9460496004b7b00d" exitCode=1 Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.747706 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"b4fe5ae45d159834a9c667093e751e227a564d39ec95f64cc51d8b99cc229ac8"} Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.748092 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"0fb0fa378a10d2f58c054ac5c1759a8c5f3b3807d985594164a305e6defd887a"} Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.748113 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"d2f715a393c524210c739be45a6809ad759184af4ae9bdac9460496004b7b00d"} Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.748138 4650 scope.go:117] "RemoveContainer" containerID="24df1c6ff930fe7d1a57bef4c75502d4fc67053987b67722936734770b770632" Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.749036 4650 scope.go:117] "RemoveContainer" containerID="d2f715a393c524210c739be45a6809ad759184af4ae9bdac9460496004b7b00d" Feb 01 07:40:53 crc kubenswrapper[4650]: E0201 07:40:53.749595 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.764312 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-pnbld" event={"ID":"e046666b-c83a-4d99-b27f-bb7edfd3e8e8","Type":"ContainerStarted","Data":"06616ed95f463e4ccf0b03c06cb4cd95c9607d8b526fe0b21e3e25ae58c2864a"} Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.764359 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-pnbld" event={"ID":"e046666b-c83a-4d99-b27f-bb7edfd3e8e8","Type":"ContainerStarted","Data":"481d4a4761e9c9b7a4f51fa472bc9fc02808952ee1d90667b86d3b2c75b16849"} Feb 01 07:40:53 crc kubenswrapper[4650]: I0201 07:40:53.972948 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa74ae54-d018-46cd-9821-0472050a483d" path="/var/lib/kubelet/pods/fa74ae54-d018-46cd-9821-0472050a483d/volumes" Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.776133 4650 generic.go:334] "Generic (PLEG): container finished" podID="e046666b-c83a-4d99-b27f-bb7edfd3e8e8" containerID="06616ed95f463e4ccf0b03c06cb4cd95c9607d8b526fe0b21e3e25ae58c2864a" exitCode=0 Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.776235 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-pnbld" event={"ID":"e046666b-c83a-4d99-b27f-bb7edfd3e8e8","Type":"ContainerDied","Data":"06616ed95f463e4ccf0b03c06cb4cd95c9607d8b526fe0b21e3e25ae58c2864a"} Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.786012 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="b4fe5ae45d159834a9c667093e751e227a564d39ec95f64cc51d8b99cc229ac8" exitCode=1 Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.786076 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="0fb0fa378a10d2f58c054ac5c1759a8c5f3b3807d985594164a305e6defd887a" exitCode=1 Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.786103 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"b4fe5ae45d159834a9c667093e751e227a564d39ec95f64cc51d8b99cc229ac8"} Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.786171 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"0fb0fa378a10d2f58c054ac5c1759a8c5f3b3807d985594164a305e6defd887a"} Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.786208 4650 scope.go:117] "RemoveContainer" containerID="a182da084e0365cc7e5a856ff0c2338464be4ba51902f1d1f8ccdfc3ff83344b" Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.787057 4650 scope.go:117] "RemoveContainer" containerID="d2f715a393c524210c739be45a6809ad759184af4ae9bdac9460496004b7b00d" Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.787134 4650 scope.go:117] "RemoveContainer" containerID="0fb0fa378a10d2f58c054ac5c1759a8c5f3b3807d985594164a305e6defd887a" Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.787281 4650 scope.go:117] "RemoveContainer" containerID="b4fe5ae45d159834a9c667093e751e227a564d39ec95f64cc51d8b99cc229ac8" Feb 01 07:40:54 crc kubenswrapper[4650]: E0201 07:40:54.787753 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:40:54 crc kubenswrapper[4650]: I0201 07:40:54.840467 4650 scope.go:117] "RemoveContainer" containerID="8d4501dcd879dbdb80eff4ccd4902baa166072d663a61fa761da4cbfe17277c5" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.246630 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.347544 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-additional-scripts\") pod \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348217 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-755v2\" (UniqueName: \"kubernetes.io/projected/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-kube-api-access-755v2\") pod \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348251 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run\") pod \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348301 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-scripts\") pod \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348329 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e046666b-c83a-4d99-b27f-bb7edfd3e8e8" (UID: "e046666b-c83a-4d99-b27f-bb7edfd3e8e8"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348332 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run" (OuterVolumeSpecName: "var-run") pod "e046666b-c83a-4d99-b27f-bb7edfd3e8e8" (UID: "e046666b-c83a-4d99-b27f-bb7edfd3e8e8"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348365 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e046666b-c83a-4d99-b27f-bb7edfd3e8e8" (UID: "e046666b-c83a-4d99-b27f-bb7edfd3e8e8"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348340 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run-ovn\") pod \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348446 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-log-ovn\") pod \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\" (UID: \"e046666b-c83a-4d99-b27f-bb7edfd3e8e8\") " Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.348569 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e046666b-c83a-4d99-b27f-bb7edfd3e8e8" (UID: "e046666b-c83a-4d99-b27f-bb7edfd3e8e8"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.349170 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-scripts" (OuterVolumeSpecName: "scripts") pod "e046666b-c83a-4d99-b27f-bb7edfd3e8e8" (UID: "e046666b-c83a-4d99-b27f-bb7edfd3e8e8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.349310 4650 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.349422 4650 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.349554 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.349567 4650 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.349580 4650 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.354492 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-kube-api-access-755v2" (OuterVolumeSpecName: "kube-api-access-755v2") pod "e046666b-c83a-4d99-b27f-bb7edfd3e8e8" (UID: "e046666b-c83a-4d99-b27f-bb7edfd3e8e8"). InnerVolumeSpecName "kube-api-access-755v2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.452084 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.452609 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-755v2\" (UniqueName: \"kubernetes.io/projected/e046666b-c83a-4d99-b27f-bb7edfd3e8e8-kube-api-access-755v2\") on node \"crc\" DevicePath \"\"" Feb 01 07:40:56 crc kubenswrapper[4650]: E0201 07:40:56.452724 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:40:56 crc kubenswrapper[4650]: E0201 07:40:56.452798 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:41:28.452770085 +0000 UTC m=+1087.175868350 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.813623 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-pnbld" event={"ID":"e046666b-c83a-4d99-b27f-bb7edfd3e8e8","Type":"ContainerDied","Data":"481d4a4761e9c9b7a4f51fa472bc9fc02808952ee1d90667b86d3b2c75b16849"} Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.813893 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="481d4a4761e9c9b7a4f51fa472bc9fc02808952ee1d90667b86d3b2c75b16849" Feb 01 07:40:56 crc kubenswrapper[4650]: I0201 07:40:56.813925 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-pnbld" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.365161 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-df4vg-config-pnbld"] Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.372069 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-df4vg-config-pnbld"] Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.475855 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-df4vg-config-hscsn"] Feb 01 07:40:57 crc kubenswrapper[4650]: E0201 07:40:57.476164 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e046666b-c83a-4d99-b27f-bb7edfd3e8e8" containerName="ovn-config" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.476176 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="e046666b-c83a-4d99-b27f-bb7edfd3e8e8" containerName="ovn-config" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.476340 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="e046666b-c83a-4d99-b27f-bb7edfd3e8e8" containerName="ovn-config" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.476809 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.479960 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.501737 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg-config-hscsn"] Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.569846 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-additional-scripts\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.569884 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-scripts\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.569936 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.569953 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2sfz\" (UniqueName: \"kubernetes.io/projected/c3f0e263-8271-4b59-8064-4cf05e94e611-kube-api-access-l2sfz\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.569991 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-log-ovn\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.570019 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run-ovn\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.571185 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.671899 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.672137 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2sfz\" (UniqueName: \"kubernetes.io/projected/c3f0e263-8271-4b59-8064-4cf05e94e611-kube-api-access-l2sfz\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.672242 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.672639 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-log-ovn\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.672767 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-log-ovn\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.672987 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run-ovn\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.673104 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run-ovn\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.673378 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-additional-scripts\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.673418 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-scripts\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.674085 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-additional-scripts\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.675256 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-scripts\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.705161 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2sfz\" (UniqueName: \"kubernetes.io/projected/c3f0e263-8271-4b59-8064-4cf05e94e611-kube-api-access-l2sfz\") pod \"ovn-controller-df4vg-config-hscsn\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.792010 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.976852 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e046666b-c83a-4d99-b27f-bb7edfd3e8e8" path="/var/lib/kubelet/pods/e046666b-c83a-4d99-b27f-bb7edfd3e8e8/volumes" Feb 01 07:40:57 crc kubenswrapper[4650]: I0201 07:40:57.979179 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.009367 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-6b89b"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.015877 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.042776 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6b89b"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.080733 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5sv5\" (UniqueName: \"kubernetes.io/projected/b8de9a37-3519-4804-b3e8-197bea437afe-kube-api-access-c5sv5\") pod \"cinder-db-create-6b89b\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.080961 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8de9a37-3519-4804-b3e8-197bea437afe-operator-scripts\") pod \"cinder-db-create-6b89b\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.117749 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-zc2xb"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.119197 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.145902 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-zc2xb"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.183474 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5sv5\" (UniqueName: \"kubernetes.io/projected/b8de9a37-3519-4804-b3e8-197bea437afe-kube-api-access-c5sv5\") pod \"cinder-db-create-6b89b\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.183512 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8de9a37-3519-4804-b3e8-197bea437afe-operator-scripts\") pod \"cinder-db-create-6b89b\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.185914 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8de9a37-3519-4804-b3e8-197bea437afe-operator-scripts\") pod \"cinder-db-create-6b89b\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.232627 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-a6d9-account-create-update-x2cpx"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.253568 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a6d9-account-create-update-x2cpx"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.253799 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.266204 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.286753 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5sv5\" (UniqueName: \"kubernetes.io/projected/b8de9a37-3519-4804-b3e8-197bea437afe-kube-api-access-c5sv5\") pod \"cinder-db-create-6b89b\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.287552 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92592241-70ef-42cc-b3b7-3a85bcdba8a8-operator-scripts\") pod \"barbican-db-create-zc2xb\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.287604 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svprc\" (UniqueName: \"kubernetes.io/projected/92592241-70ef-42cc-b3b7-3a85bcdba8a8-kube-api-access-svprc\") pod \"barbican-db-create-zc2xb\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.340378 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6b89b" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.392274 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92592241-70ef-42cc-b3b7-3a85bcdba8a8-operator-scripts\") pod \"barbican-db-create-zc2xb\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.394124 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f87nj\" (UniqueName: \"kubernetes.io/projected/44937f93-aef1-4223-ad7e-5d05832d2f4b-kube-api-access-f87nj\") pod \"barbican-a6d9-account-create-update-x2cpx\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.394238 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svprc\" (UniqueName: \"kubernetes.io/projected/92592241-70ef-42cc-b3b7-3a85bcdba8a8-kube-api-access-svprc\") pod \"barbican-db-create-zc2xb\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.394984 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92592241-70ef-42cc-b3b7-3a85bcdba8a8-operator-scripts\") pod \"barbican-db-create-zc2xb\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.395183 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44937f93-aef1-4223-ad7e-5d05832d2f4b-operator-scripts\") pod \"barbican-a6d9-account-create-update-x2cpx\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.430464 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-3173-account-create-update-l5lzr"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.431837 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.435254 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.448431 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3173-account-create-update-l5lzr"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.464443 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svprc\" (UniqueName: \"kubernetes.io/projected/92592241-70ef-42cc-b3b7-3a85bcdba8a8-kube-api-access-svprc\") pod \"barbican-db-create-zc2xb\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.506478 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f87nj\" (UniqueName: \"kubernetes.io/projected/44937f93-aef1-4223-ad7e-5d05832d2f4b-kube-api-access-f87nj\") pod \"barbican-a6d9-account-create-update-x2cpx\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.506533 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44937f93-aef1-4223-ad7e-5d05832d2f4b-operator-scripts\") pod \"barbican-a6d9-account-create-update-x2cpx\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.515584 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44937f93-aef1-4223-ad7e-5d05832d2f4b-operator-scripts\") pod \"barbican-a6d9-account-create-update-x2cpx\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.525879 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-fllvl"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.526954 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: W0201 07:40:58.547380 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3f0e263_8271_4b59_8064_4cf05e94e611.slice/crio-5421a36a148ae25fc689a692a507e05f93736610c6863be27ccb1267d5281225 WatchSource:0}: Error finding container 5421a36a148ae25fc689a692a507e05f93736610c6863be27ccb1267d5281225: Status 404 returned error can't find the container with id 5421a36a148ae25fc689a692a507e05f93736610c6863be27ccb1267d5281225 Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.563005 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f87nj\" (UniqueName: \"kubernetes.io/projected/44937f93-aef1-4223-ad7e-5d05832d2f4b-kube-api-access-f87nj\") pod \"barbican-a6d9-account-create-update-x2cpx\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.558130 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-df4vg-config-hscsn"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.574508 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.577355 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fllvl"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.598674 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-sz8cr"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.599772 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.603956 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.604158 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8zg69" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.604309 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.604456 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.608653 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndjdw\" (UniqueName: \"kubernetes.io/projected/60f5edb2-23c7-4720-a7f0-8a635e39cd03-kube-api-access-ndjdw\") pod \"neutron-db-create-fllvl\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.608700 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60f5edb2-23c7-4720-a7f0-8a635e39cd03-operator-scripts\") pod \"neutron-db-create-fllvl\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.608749 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8zth\" (UniqueName: \"kubernetes.io/projected/aee86276-99b2-44ef-ae5d-6072f34ffe58-kube-api-access-h8zth\") pod \"cinder-3173-account-create-update-l5lzr\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.608778 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aee86276-99b2-44ef-ae5d-6072f34ffe58-operator-scripts\") pod \"cinder-3173-account-create-update-l5lzr\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.609263 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-sz8cr"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.713657 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7lcz\" (UniqueName: \"kubernetes.io/projected/3127db22-5d48-4d3e-bd3e-806a06e6cad8-kube-api-access-m7lcz\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.713925 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndjdw\" (UniqueName: \"kubernetes.io/projected/60f5edb2-23c7-4720-a7f0-8a635e39cd03-kube-api-access-ndjdw\") pod \"neutron-db-create-fllvl\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.713950 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-combined-ca-bundle\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.713979 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60f5edb2-23c7-4720-a7f0-8a635e39cd03-operator-scripts\") pod \"neutron-db-create-fllvl\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.714018 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8zth\" (UniqueName: \"kubernetes.io/projected/aee86276-99b2-44ef-ae5d-6072f34ffe58-kube-api-access-h8zth\") pod \"cinder-3173-account-create-update-l5lzr\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.714058 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aee86276-99b2-44ef-ae5d-6072f34ffe58-operator-scripts\") pod \"cinder-3173-account-create-update-l5lzr\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.714087 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-config-data\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.714791 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60f5edb2-23c7-4720-a7f0-8a635e39cd03-operator-scripts\") pod \"neutron-db-create-fllvl\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.715120 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aee86276-99b2-44ef-ae5d-6072f34ffe58-operator-scripts\") pod \"cinder-3173-account-create-update-l5lzr\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.718018 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6caf-account-create-update-lwpqh"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.718902 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.726687 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.752983 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6caf-account-create-update-lwpqh"] Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.759552 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zc2xb" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.794719 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8zth\" (UniqueName: \"kubernetes.io/projected/aee86276-99b2-44ef-ae5d-6072f34ffe58-kube-api-access-h8zth\") pod \"cinder-3173-account-create-update-l5lzr\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.796671 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndjdw\" (UniqueName: \"kubernetes.io/projected/60f5edb2-23c7-4720-a7f0-8a635e39cd03-kube-api-access-ndjdw\") pod \"neutron-db-create-fllvl\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.816792 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj2q5\" (UniqueName: \"kubernetes.io/projected/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-kube-api-access-bj2q5\") pod \"neutron-6caf-account-create-update-lwpqh\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.816826 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-operator-scripts\") pod \"neutron-6caf-account-create-update-lwpqh\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.816870 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-config-data\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.816932 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7lcz\" (UniqueName: \"kubernetes.io/projected/3127db22-5d48-4d3e-bd3e-806a06e6cad8-kube-api-access-m7lcz\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.816971 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-combined-ca-bundle\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.848745 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-hscsn" event={"ID":"c3f0e263-8271-4b59-8064-4cf05e94e611","Type":"ContainerStarted","Data":"5421a36a148ae25fc689a692a507e05f93736610c6863be27ccb1267d5281225"} Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.855272 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-combined-ca-bundle\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.861748 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-config-data\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.866931 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fllvl" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.870532 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7lcz\" (UniqueName: \"kubernetes.io/projected/3127db22-5d48-4d3e-bd3e-806a06e6cad8-kube-api-access-m7lcz\") pod \"keystone-db-sync-sz8cr\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.918671 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-operator-scripts\") pod \"neutron-6caf-account-create-update-lwpqh\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.918713 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj2q5\" (UniqueName: \"kubernetes.io/projected/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-kube-api-access-bj2q5\") pod \"neutron-6caf-account-create-update-lwpqh\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.919581 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-operator-scripts\") pod \"neutron-6caf-account-create-update-lwpqh\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.927429 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:40:58 crc kubenswrapper[4650]: I0201 07:40:58.991940 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj2q5\" (UniqueName: \"kubernetes.io/projected/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-kube-api-access-bj2q5\") pod \"neutron-6caf-account-create-update-lwpqh\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.057984 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.063273 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.262699 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6b89b"] Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.383201 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-zc2xb"] Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.549877 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a6d9-account-create-update-x2cpx"] Feb 01 07:40:59 crc kubenswrapper[4650]: W0201 07:40:59.577946 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44937f93_aef1_4223_ad7e_5d05832d2f4b.slice/crio-34bfdc78b88b0cb9640d1dd5f0430b10a79e6df8d9d7492c42891a295dd13e30 WatchSource:0}: Error finding container 34bfdc78b88b0cb9640d1dd5f0430b10a79e6df8d9d7492c42891a295dd13e30: Status 404 returned error can't find the container with id 34bfdc78b88b0cb9640d1dd5f0430b10a79e6df8d9d7492c42891a295dd13e30 Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.860333 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-hscsn" event={"ID":"c3f0e263-8271-4b59-8064-4cf05e94e611","Type":"ContainerStarted","Data":"3fe1ea57719f5cc7d5fa12a1945a5b41907c3b01e2d2c0b7ccb85779246cf245"} Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.869691 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a6d9-account-create-update-x2cpx" event={"ID":"44937f93-aef1-4223-ad7e-5d05832d2f4b","Type":"ContainerStarted","Data":"34bfdc78b88b0cb9640d1dd5f0430b10a79e6df8d9d7492c42891a295dd13e30"} Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.887986 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-zc2xb" event={"ID":"92592241-70ef-42cc-b3b7-3a85bcdba8a8","Type":"ContainerStarted","Data":"b5eb363e96674975163c0fdcf1abb1291e2df1a43bbc523300a5ee0145efa2d7"} Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.888043 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-zc2xb" event={"ID":"92592241-70ef-42cc-b3b7-3a85bcdba8a8","Type":"ContainerStarted","Data":"c627b821623872652cd370db1c81f69b6fd9246cffa7acb6f659b15cd757da45"} Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.888320 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-df4vg-config-hscsn" podStartSLOduration=2.888309885 podStartE2EDuration="2.888309885s" podCreationTimestamp="2026-02-01 07:40:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:40:59.884857844 +0000 UTC m=+1058.607956089" watchObservedRunningTime="2026-02-01 07:40:59.888309885 +0000 UTC m=+1058.611408130" Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.896171 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6b89b" event={"ID":"b8de9a37-3519-4804-b3e8-197bea437afe","Type":"ContainerStarted","Data":"39fafc57e0b41135c1e17de2462ff04275d6e157c6e85f6d4402daa9d39ed7c3"} Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.910386 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-zc2xb" podStartSLOduration=1.9103720260000001 podStartE2EDuration="1.910372026s" podCreationTimestamp="2026-02-01 07:40:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:40:59.907433039 +0000 UTC m=+1058.630531284" watchObservedRunningTime="2026-02-01 07:40:59.910372026 +0000 UTC m=+1058.633470271" Feb 01 07:40:59 crc kubenswrapper[4650]: I0201 07:40:59.933825 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-6b89b" podStartSLOduration=2.933803944 podStartE2EDuration="2.933803944s" podCreationTimestamp="2026-02-01 07:40:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:40:59.929928402 +0000 UTC m=+1058.653026667" watchObservedRunningTime="2026-02-01 07:40:59.933803944 +0000 UTC m=+1058.656902189" Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.031198 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6caf-account-create-update-lwpqh"] Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.039705 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-sz8cr"] Feb 01 07:41:00 crc kubenswrapper[4650]: W0201 07:41:00.063512 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8368ba08_f07f_4082_bfd0_e72e1a38d7a8.slice/crio-7cc05834e44fa03f4e62056bff82fa3b7411d02f0a41639f5fe42bb225408d9c WatchSource:0}: Error finding container 7cc05834e44fa03f4e62056bff82fa3b7411d02f0a41639f5fe42bb225408d9c: Status 404 returned error can't find the container with id 7cc05834e44fa03f4e62056bff82fa3b7411d02f0a41639f5fe42bb225408d9c Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.229577 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3173-account-create-update-l5lzr"] Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.244623 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fllvl"] Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.907877 4650 generic.go:334] "Generic (PLEG): container finished" podID="c3f0e263-8271-4b59-8064-4cf05e94e611" containerID="3fe1ea57719f5cc7d5fa12a1945a5b41907c3b01e2d2c0b7ccb85779246cf245" exitCode=0 Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.908154 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-hscsn" event={"ID":"c3f0e263-8271-4b59-8064-4cf05e94e611","Type":"ContainerDied","Data":"3fe1ea57719f5cc7d5fa12a1945a5b41907c3b01e2d2c0b7ccb85779246cf245"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.911385 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sz8cr" event={"ID":"3127db22-5d48-4d3e-bd3e-806a06e6cad8","Type":"ContainerStarted","Data":"39222268f3e481813c0a94185bc97fa0a31d820a98a537bbd8a1d7457bca71c7"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.914963 4650 generic.go:334] "Generic (PLEG): container finished" podID="44937f93-aef1-4223-ad7e-5d05832d2f4b" containerID="a8d34655beb163077dd0a62ab6ed81fe4ff5c3fbc4ab53e6e25ef443e61cacbf" exitCode=0 Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.915155 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a6d9-account-create-update-x2cpx" event={"ID":"44937f93-aef1-4223-ad7e-5d05832d2f4b","Type":"ContainerDied","Data":"a8d34655beb163077dd0a62ab6ed81fe4ff5c3fbc4ab53e6e25ef443e61cacbf"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.916797 4650 generic.go:334] "Generic (PLEG): container finished" podID="aee86276-99b2-44ef-ae5d-6072f34ffe58" containerID="32d81a88d968689fba06ddcdc747ee142ebd543892b4554577feb395a000e4db" exitCode=0 Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.916899 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3173-account-create-update-l5lzr" event={"ID":"aee86276-99b2-44ef-ae5d-6072f34ffe58","Type":"ContainerDied","Data":"32d81a88d968689fba06ddcdc747ee142ebd543892b4554577feb395a000e4db"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.916982 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3173-account-create-update-l5lzr" event={"ID":"aee86276-99b2-44ef-ae5d-6072f34ffe58","Type":"ContainerStarted","Data":"184a12dd55673361b8742b1c2dbb0dc4afb0787a91bbf7951e9211f411feda3c"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.922710 4650 generic.go:334] "Generic (PLEG): container finished" podID="92592241-70ef-42cc-b3b7-3a85bcdba8a8" containerID="b5eb363e96674975163c0fdcf1abb1291e2df1a43bbc523300a5ee0145efa2d7" exitCode=0 Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.922873 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-zc2xb" event={"ID":"92592241-70ef-42cc-b3b7-3a85bcdba8a8","Type":"ContainerDied","Data":"b5eb363e96674975163c0fdcf1abb1291e2df1a43bbc523300a5ee0145efa2d7"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.924902 4650 generic.go:334] "Generic (PLEG): container finished" podID="b8de9a37-3519-4804-b3e8-197bea437afe" containerID="d325c2535cb35a0256074fa0ec2c38aceb9baae973c61da8d6d5fe1b1c585c06" exitCode=0 Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.925004 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6b89b" event={"ID":"b8de9a37-3519-4804-b3e8-197bea437afe","Type":"ContainerDied","Data":"d325c2535cb35a0256074fa0ec2c38aceb9baae973c61da8d6d5fe1b1c585c06"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.927158 4650 generic.go:334] "Generic (PLEG): container finished" podID="8368ba08-f07f-4082-bfd0-e72e1a38d7a8" containerID="bf03f0e1b53e1196ecb8395cf75dd74193c0e458b3322b2002b77202a701682f" exitCode=0 Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.927311 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6caf-account-create-update-lwpqh" event={"ID":"8368ba08-f07f-4082-bfd0-e72e1a38d7a8","Type":"ContainerDied","Data":"bf03f0e1b53e1196ecb8395cf75dd74193c0e458b3322b2002b77202a701682f"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.927467 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6caf-account-create-update-lwpqh" event={"ID":"8368ba08-f07f-4082-bfd0-e72e1a38d7a8","Type":"ContainerStarted","Data":"7cc05834e44fa03f4e62056bff82fa3b7411d02f0a41639f5fe42bb225408d9c"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.929886 4650 generic.go:334] "Generic (PLEG): container finished" podID="60f5edb2-23c7-4720-a7f0-8a635e39cd03" containerID="c6c329464cf6866eaac072a3bbd926192fefe4f51643c556b4c28a270c2ef9a2" exitCode=0 Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.929934 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fllvl" event={"ID":"60f5edb2-23c7-4720-a7f0-8a635e39cd03","Type":"ContainerDied","Data":"c6c329464cf6866eaac072a3bbd926192fefe4f51643c556b4c28a270c2ef9a2"} Feb 01 07:41:00 crc kubenswrapper[4650]: I0201 07:41:00.929961 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fllvl" event={"ID":"60f5edb2-23c7-4720-a7f0-8a635e39cd03","Type":"ContainerStarted","Data":"65f94d23a7b8424a81245c68ae48b06b6faad12d5e0c176b1bafc144fd518f5f"} Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.561335 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.705973 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bj2q5\" (UniqueName: \"kubernetes.io/projected/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-kube-api-access-bj2q5\") pod \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.706167 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-operator-scripts\") pod \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\" (UID: \"8368ba08-f07f-4082-bfd0-e72e1a38d7a8\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.707073 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8368ba08-f07f-4082-bfd0-e72e1a38d7a8" (UID: "8368ba08-f07f-4082-bfd0-e72e1a38d7a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.712226 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-kube-api-access-bj2q5" (OuterVolumeSpecName: "kube-api-access-bj2q5") pod "8368ba08-f07f-4082-bfd0-e72e1a38d7a8" (UID: "8368ba08-f07f-4082-bfd0-e72e1a38d7a8"). InnerVolumeSpecName "kube-api-access-bj2q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.772311 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.773302 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zc2xb" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.808404 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bj2q5\" (UniqueName: \"kubernetes.io/projected/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-kube-api-access-bj2q5\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.808432 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8368ba08-f07f-4082-bfd0-e72e1a38d7a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.809697 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.821348 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6b89b" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913451 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run\") pod \"c3f0e263-8271-4b59-8064-4cf05e94e611\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913527 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run-ovn\") pod \"c3f0e263-8271-4b59-8064-4cf05e94e611\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913689 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-additional-scripts\") pod \"c3f0e263-8271-4b59-8064-4cf05e94e611\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913758 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92592241-70ef-42cc-b3b7-3a85bcdba8a8-operator-scripts\") pod \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913808 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-log-ovn\") pod \"c3f0e263-8271-4b59-8064-4cf05e94e611\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913845 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f87nj\" (UniqueName: \"kubernetes.io/projected/44937f93-aef1-4223-ad7e-5d05832d2f4b-kube-api-access-f87nj\") pod \"44937f93-aef1-4223-ad7e-5d05832d2f4b\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913900 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svprc\" (UniqueName: \"kubernetes.io/projected/92592241-70ef-42cc-b3b7-3a85bcdba8a8-kube-api-access-svprc\") pod \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\" (UID: \"92592241-70ef-42cc-b3b7-3a85bcdba8a8\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913936 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44937f93-aef1-4223-ad7e-5d05832d2f4b-operator-scripts\") pod \"44937f93-aef1-4223-ad7e-5d05832d2f4b\" (UID: \"44937f93-aef1-4223-ad7e-5d05832d2f4b\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.913988 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2sfz\" (UniqueName: \"kubernetes.io/projected/c3f0e263-8271-4b59-8064-4cf05e94e611-kube-api-access-l2sfz\") pod \"c3f0e263-8271-4b59-8064-4cf05e94e611\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.914015 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-scripts\") pod \"c3f0e263-8271-4b59-8064-4cf05e94e611\" (UID: \"c3f0e263-8271-4b59-8064-4cf05e94e611\") " Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.915900 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-scripts" (OuterVolumeSpecName: "scripts") pod "c3f0e263-8271-4b59-8064-4cf05e94e611" (UID: "c3f0e263-8271-4b59-8064-4cf05e94e611"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.915941 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run" (OuterVolumeSpecName: "var-run") pod "c3f0e263-8271-4b59-8064-4cf05e94e611" (UID: "c3f0e263-8271-4b59-8064-4cf05e94e611"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.915982 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "c3f0e263-8271-4b59-8064-4cf05e94e611" (UID: "c3f0e263-8271-4b59-8064-4cf05e94e611"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.916718 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "c3f0e263-8271-4b59-8064-4cf05e94e611" (UID: "c3f0e263-8271-4b59-8064-4cf05e94e611"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.917616 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92592241-70ef-42cc-b3b7-3a85bcdba8a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "92592241-70ef-42cc-b3b7-3a85bcdba8a8" (UID: "92592241-70ef-42cc-b3b7-3a85bcdba8a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.917676 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "c3f0e263-8271-4b59-8064-4cf05e94e611" (UID: "c3f0e263-8271-4b59-8064-4cf05e94e611"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.921345 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44937f93-aef1-4223-ad7e-5d05832d2f4b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "44937f93-aef1-4223-ad7e-5d05832d2f4b" (UID: "44937f93-aef1-4223-ad7e-5d05832d2f4b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.937378 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3f0e263-8271-4b59-8064-4cf05e94e611-kube-api-access-l2sfz" (OuterVolumeSpecName: "kube-api-access-l2sfz") pod "c3f0e263-8271-4b59-8064-4cf05e94e611" (UID: "c3f0e263-8271-4b59-8064-4cf05e94e611"). InnerVolumeSpecName "kube-api-access-l2sfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.943374 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92592241-70ef-42cc-b3b7-3a85bcdba8a8-kube-api-access-svprc" (OuterVolumeSpecName: "kube-api-access-svprc") pod "92592241-70ef-42cc-b3b7-3a85bcdba8a8" (UID: "92592241-70ef-42cc-b3b7-3a85bcdba8a8"). InnerVolumeSpecName "kube-api-access-svprc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.967222 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44937f93-aef1-4223-ad7e-5d05832d2f4b-kube-api-access-f87nj" (OuterVolumeSpecName: "kube-api-access-f87nj") pod "44937f93-aef1-4223-ad7e-5d05832d2f4b" (UID: "44937f93-aef1-4223-ad7e-5d05832d2f4b"). InnerVolumeSpecName "kube-api-access-f87nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.975397 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.979286 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3173-account-create-update-l5lzr" event={"ID":"aee86276-99b2-44ef-ae5d-6072f34ffe58","Type":"ContainerDied","Data":"184a12dd55673361b8742b1c2dbb0dc4afb0787a91bbf7951e9211f411feda3c"} Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.979311 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="184a12dd55673361b8742b1c2dbb0dc4afb0787a91bbf7951e9211f411feda3c" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.982516 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-zc2xb" event={"ID":"92592241-70ef-42cc-b3b7-3a85bcdba8a8","Type":"ContainerDied","Data":"c627b821623872652cd370db1c81f69b6fd9246cffa7acb6f659b15cd757da45"} Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.982541 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c627b821623872652cd370db1c81f69b6fd9246cffa7acb6f659b15cd757da45" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.982605 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zc2xb" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.994361 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fllvl" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.995197 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6b89b" event={"ID":"b8de9a37-3519-4804-b3e8-197bea437afe","Type":"ContainerDied","Data":"39fafc57e0b41135c1e17de2462ff04275d6e157c6e85f6d4402daa9d39ed7c3"} Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.995239 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39fafc57e0b41135c1e17de2462ff04275d6e157c6e85f6d4402daa9d39ed7c3" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.995306 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6b89b" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.996825 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6caf-account-create-update-lwpqh" event={"ID":"8368ba08-f07f-4082-bfd0-e72e1a38d7a8","Type":"ContainerDied","Data":"7cc05834e44fa03f4e62056bff82fa3b7411d02f0a41639f5fe42bb225408d9c"} Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.996847 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cc05834e44fa03f4e62056bff82fa3b7411d02f0a41639f5fe42bb225408d9c" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.996885 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6caf-account-create-update-lwpqh" Feb 01 07:41:02 crc kubenswrapper[4650]: I0201 07:41:02.998170 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-df4vg-config-hscsn"] Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.000449 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fllvl" event={"ID":"60f5edb2-23c7-4720-a7f0-8a635e39cd03","Type":"ContainerDied","Data":"65f94d23a7b8424a81245c68ae48b06b6faad12d5e0c176b1bafc144fd518f5f"} Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.000483 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fllvl" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.000496 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65f94d23a7b8424a81245c68ae48b06b6faad12d5e0c176b1bafc144fd518f5f" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.002956 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-df4vg-config-hscsn" event={"ID":"c3f0e263-8271-4b59-8064-4cf05e94e611","Type":"ContainerDied","Data":"5421a36a148ae25fc689a692a507e05f93736610c6863be27ccb1267d5281225"} Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.002985 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5421a36a148ae25fc689a692a507e05f93736610c6863be27ccb1267d5281225" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.003144 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-df4vg-config-hscsn" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.013102 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-df4vg-config-hscsn"] Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.017099 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8de9a37-3519-4804-b3e8-197bea437afe-operator-scripts\") pod \"b8de9a37-3519-4804-b3e8-197bea437afe\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.017412 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5sv5\" (UniqueName: \"kubernetes.io/projected/b8de9a37-3519-4804-b3e8-197bea437afe-kube-api-access-c5sv5\") pod \"b8de9a37-3519-4804-b3e8-197bea437afe\" (UID: \"b8de9a37-3519-4804-b3e8-197bea437afe\") " Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018009 4650 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018121 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92592241-70ef-42cc-b3b7-3a85bcdba8a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018185 4650 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018297 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f87nj\" (UniqueName: \"kubernetes.io/projected/44937f93-aef1-4223-ad7e-5d05832d2f4b-kube-api-access-f87nj\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018382 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svprc\" (UniqueName: \"kubernetes.io/projected/92592241-70ef-42cc-b3b7-3a85bcdba8a8-kube-api-access-svprc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018451 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44937f93-aef1-4223-ad7e-5d05832d2f4b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018523 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2sfz\" (UniqueName: \"kubernetes.io/projected/c3f0e263-8271-4b59-8064-4cf05e94e611-kube-api-access-l2sfz\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018592 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c3f0e263-8271-4b59-8064-4cf05e94e611-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018658 4650 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.018723 4650 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c3f0e263-8271-4b59-8064-4cf05e94e611-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.024567 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8de9a37-3519-4804-b3e8-197bea437afe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b8de9a37-3519-4804-b3e8-197bea437afe" (UID: "b8de9a37-3519-4804-b3e8-197bea437afe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.028736 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a6d9-account-create-update-x2cpx" event={"ID":"44937f93-aef1-4223-ad7e-5d05832d2f4b","Type":"ContainerDied","Data":"34bfdc78b88b0cb9640d1dd5f0430b10a79e6df8d9d7492c42891a295dd13e30"} Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.028766 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34bfdc78b88b0cb9640d1dd5f0430b10a79e6df8d9d7492c42891a295dd13e30" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.028833 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a6d9-account-create-update-x2cpx" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.044306 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8de9a37-3519-4804-b3e8-197bea437afe-kube-api-access-c5sv5" (OuterVolumeSpecName: "kube-api-access-c5sv5") pod "b8de9a37-3519-4804-b3e8-197bea437afe" (UID: "b8de9a37-3519-4804-b3e8-197bea437afe"). InnerVolumeSpecName "kube-api-access-c5sv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.120606 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60f5edb2-23c7-4720-a7f0-8a635e39cd03-operator-scripts\") pod \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.120678 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8zth\" (UniqueName: \"kubernetes.io/projected/aee86276-99b2-44ef-ae5d-6072f34ffe58-kube-api-access-h8zth\") pod \"aee86276-99b2-44ef-ae5d-6072f34ffe58\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.120732 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndjdw\" (UniqueName: \"kubernetes.io/projected/60f5edb2-23c7-4720-a7f0-8a635e39cd03-kube-api-access-ndjdw\") pod \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\" (UID: \"60f5edb2-23c7-4720-a7f0-8a635e39cd03\") " Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.120824 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aee86276-99b2-44ef-ae5d-6072f34ffe58-operator-scripts\") pod \"aee86276-99b2-44ef-ae5d-6072f34ffe58\" (UID: \"aee86276-99b2-44ef-ae5d-6072f34ffe58\") " Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.121136 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60f5edb2-23c7-4720-a7f0-8a635e39cd03-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "60f5edb2-23c7-4720-a7f0-8a635e39cd03" (UID: "60f5edb2-23c7-4720-a7f0-8a635e39cd03"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.121211 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5sv5\" (UniqueName: \"kubernetes.io/projected/b8de9a37-3519-4804-b3e8-197bea437afe-kube-api-access-c5sv5\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.121231 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8de9a37-3519-4804-b3e8-197bea437afe-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.123612 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aee86276-99b2-44ef-ae5d-6072f34ffe58-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aee86276-99b2-44ef-ae5d-6072f34ffe58" (UID: "aee86276-99b2-44ef-ae5d-6072f34ffe58"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.123605 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aee86276-99b2-44ef-ae5d-6072f34ffe58-kube-api-access-h8zth" (OuterVolumeSpecName: "kube-api-access-h8zth") pod "aee86276-99b2-44ef-ae5d-6072f34ffe58" (UID: "aee86276-99b2-44ef-ae5d-6072f34ffe58"). InnerVolumeSpecName "kube-api-access-h8zth". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.125176 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60f5edb2-23c7-4720-a7f0-8a635e39cd03-kube-api-access-ndjdw" (OuterVolumeSpecName: "kube-api-access-ndjdw") pod "60f5edb2-23c7-4720-a7f0-8a635e39cd03" (UID: "60f5edb2-23c7-4720-a7f0-8a635e39cd03"). InnerVolumeSpecName "kube-api-access-ndjdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.222766 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60f5edb2-23c7-4720-a7f0-8a635e39cd03-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.222799 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8zth\" (UniqueName: \"kubernetes.io/projected/aee86276-99b2-44ef-ae5d-6072f34ffe58-kube-api-access-h8zth\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.222811 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndjdw\" (UniqueName: \"kubernetes.io/projected/60f5edb2-23c7-4720-a7f0-8a635e39cd03-kube-api-access-ndjdw\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.222820 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aee86276-99b2-44ef-ae5d-6072f34ffe58-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:03 crc kubenswrapper[4650]: I0201 07:41:03.979156 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3f0e263-8271-4b59-8064-4cf05e94e611" path="/var/lib/kubelet/pods/c3f0e263-8271-4b59-8064-4cf05e94e611/volumes" Feb 01 07:41:04 crc kubenswrapper[4650]: I0201 07:41:04.044757 4650 generic.go:334] "Generic (PLEG): container finished" podID="b01aeb4f-ec32-444e-b714-6ab54c79bad3" containerID="a4c3419430b19047f133d5cd68eeaeef7602934e5639e9f02510aaec85580f3f" exitCode=0 Feb 01 07:41:04 crc kubenswrapper[4650]: I0201 07:41:04.044801 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bkqlt" event={"ID":"b01aeb4f-ec32-444e-b714-6ab54c79bad3","Type":"ContainerDied","Data":"a4c3419430b19047f133d5cd68eeaeef7602934e5639e9f02510aaec85580f3f"} Feb 01 07:41:04 crc kubenswrapper[4650]: I0201 07:41:04.045055 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3173-account-create-update-l5lzr" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.071117 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bkqlt" event={"ID":"b01aeb4f-ec32-444e-b714-6ab54c79bad3","Type":"ContainerDied","Data":"05886c72c66341b5d156df96f39552d2c9d7b05c9b0a6f034a028087d740e7e5"} Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.071426 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05886c72c66341b5d156df96f39552d2c9d7b05c9b0a6f034a028087d740e7e5" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.285853 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bkqlt" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.376687 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-config-data\") pod \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.376786 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nzxw\" (UniqueName: \"kubernetes.io/projected/b01aeb4f-ec32-444e-b714-6ab54c79bad3-kube-api-access-8nzxw\") pod \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.376874 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-db-sync-config-data\") pod \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.376919 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-combined-ca-bundle\") pod \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\" (UID: \"b01aeb4f-ec32-444e-b714-6ab54c79bad3\") " Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.383680 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b01aeb4f-ec32-444e-b714-6ab54c79bad3-kube-api-access-8nzxw" (OuterVolumeSpecName: "kube-api-access-8nzxw") pod "b01aeb4f-ec32-444e-b714-6ab54c79bad3" (UID: "b01aeb4f-ec32-444e-b714-6ab54c79bad3"). InnerVolumeSpecName "kube-api-access-8nzxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.393774 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b01aeb4f-ec32-444e-b714-6ab54c79bad3" (UID: "b01aeb4f-ec32-444e-b714-6ab54c79bad3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.400414 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b01aeb4f-ec32-444e-b714-6ab54c79bad3" (UID: "b01aeb4f-ec32-444e-b714-6ab54c79bad3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.453749 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-config-data" (OuterVolumeSpecName: "config-data") pod "b01aeb4f-ec32-444e-b714-6ab54c79bad3" (UID: "b01aeb4f-ec32-444e-b714-6ab54c79bad3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.478867 4650 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.478892 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.478900 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b01aeb4f-ec32-444e-b714-6ab54c79bad3-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:06 crc kubenswrapper[4650]: I0201 07:41:06.478908 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nzxw\" (UniqueName: \"kubernetes.io/projected/b01aeb4f-ec32-444e-b714-6ab54c79bad3-kube-api-access-8nzxw\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.090602 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bkqlt" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.091146 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sz8cr" event={"ID":"3127db22-5d48-4d3e-bd3e-806a06e6cad8","Type":"ContainerStarted","Data":"d70d4e097467770c1b69cc43e7089ed32e2a33e86124026db3fd2fbec8555db5"} Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.114657 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-sz8cr" podStartSLOduration=3.052447315 podStartE2EDuration="9.1146408s" podCreationTimestamp="2026-02-01 07:40:58 +0000 UTC" firstStartedPulling="2026-02-01 07:41:00.075408347 +0000 UTC m=+1058.798506592" lastFinishedPulling="2026-02-01 07:41:06.137601832 +0000 UTC m=+1064.860700077" observedRunningTime="2026-02-01 07:41:07.107701297 +0000 UTC m=+1065.830799612" watchObservedRunningTime="2026-02-01 07:41:07.1146408 +0000 UTC m=+1065.837739055" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.753505 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-ssvxw"] Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.753995 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aee86276-99b2-44ef-ae5d-6072f34ffe58" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754012 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="aee86276-99b2-44ef-ae5d-6072f34ffe58" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.754070 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92592241-70ef-42cc-b3b7-3a85bcdba8a8" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754077 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="92592241-70ef-42cc-b3b7-3a85bcdba8a8" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.754095 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3f0e263-8271-4b59-8064-4cf05e94e611" containerName="ovn-config" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754101 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3f0e263-8271-4b59-8064-4cf05e94e611" containerName="ovn-config" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.754112 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8368ba08-f07f-4082-bfd0-e72e1a38d7a8" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754119 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="8368ba08-f07f-4082-bfd0-e72e1a38d7a8" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.754131 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8de9a37-3519-4804-b3e8-197bea437afe" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754136 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8de9a37-3519-4804-b3e8-197bea437afe" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.754148 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b01aeb4f-ec32-444e-b714-6ab54c79bad3" containerName="glance-db-sync" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754154 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b01aeb4f-ec32-444e-b714-6ab54c79bad3" containerName="glance-db-sync" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.754164 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44937f93-aef1-4223-ad7e-5d05832d2f4b" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754169 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="44937f93-aef1-4223-ad7e-5d05832d2f4b" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.754178 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60f5edb2-23c7-4720-a7f0-8a635e39cd03" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754184 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="60f5edb2-23c7-4720-a7f0-8a635e39cd03" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754312 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="aee86276-99b2-44ef-ae5d-6072f34ffe58" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754328 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="92592241-70ef-42cc-b3b7-3a85bcdba8a8" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754335 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8de9a37-3519-4804-b3e8-197bea437afe" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754342 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3f0e263-8271-4b59-8064-4cf05e94e611" containerName="ovn-config" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754350 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="44937f93-aef1-4223-ad7e-5d05832d2f4b" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754359 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="60f5edb2-23c7-4720-a7f0-8a635e39cd03" containerName="mariadb-database-create" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754373 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b01aeb4f-ec32-444e-b714-6ab54c79bad3" containerName="glance-db-sync" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.754382 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="8368ba08-f07f-4082-bfd0-e72e1a38d7a8" containerName="mariadb-account-create-update" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.755190 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.779918 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-ssvxw"] Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.905330 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.905377 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-dns-svc\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.905443 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk7z5\" (UniqueName: \"kubernetes.io/projected/df2a9e33-1389-4213-a5b7-7d749e523079-kube-api-access-dk7z5\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.905470 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-config\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.905487 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.966125 4650 scope.go:117] "RemoveContainer" containerID="d2f715a393c524210c739be45a6809ad759184af4ae9bdac9460496004b7b00d" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.966192 4650 scope.go:117] "RemoveContainer" containerID="0fb0fa378a10d2f58c054ac5c1759a8c5f3b3807d985594164a305e6defd887a" Feb 01 07:41:07 crc kubenswrapper[4650]: I0201 07:41:07.966281 4650 scope.go:117] "RemoveContainer" containerID="b4fe5ae45d159834a9c667093e751e227a564d39ec95f64cc51d8b99cc229ac8" Feb 01 07:41:07 crc kubenswrapper[4650]: E0201 07:41:07.966553 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.007447 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-dns-svc\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.007558 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk7z5\" (UniqueName: \"kubernetes.io/projected/df2a9e33-1389-4213-a5b7-7d749e523079-kube-api-access-dk7z5\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.007600 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-config\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.007618 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.007707 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.009052 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-config\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.009052 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-dns-svc\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.009168 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.009176 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.030487 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk7z5\" (UniqueName: \"kubernetes.io/projected/df2a9e33-1389-4213-a5b7-7d749e523079-kube-api-access-dk7z5\") pod \"dnsmasq-dns-74dc88fc-ssvxw\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.078035 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:08 crc kubenswrapper[4650]: I0201 07:41:08.598755 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-ssvxw"] Feb 01 07:41:09 crc kubenswrapper[4650]: I0201 07:41:09.108095 4650 generic.go:334] "Generic (PLEG): container finished" podID="df2a9e33-1389-4213-a5b7-7d749e523079" containerID="26178dd82ae72fec749913f8555883cb8722a26edd98fa6638b66c5dfba79c32" exitCode=0 Feb 01 07:41:09 crc kubenswrapper[4650]: I0201 07:41:09.108182 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" event={"ID":"df2a9e33-1389-4213-a5b7-7d749e523079","Type":"ContainerDied","Data":"26178dd82ae72fec749913f8555883cb8722a26edd98fa6638b66c5dfba79c32"} Feb 01 07:41:09 crc kubenswrapper[4650]: I0201 07:41:09.108777 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" event={"ID":"df2a9e33-1389-4213-a5b7-7d749e523079","Type":"ContainerStarted","Data":"c2f4283d0ed151684763ba926445a7bde4b2a5e26eaceb0036163cb69e2a7247"} Feb 01 07:41:10 crc kubenswrapper[4650]: I0201 07:41:10.121485 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" event={"ID":"df2a9e33-1389-4213-a5b7-7d749e523079","Type":"ContainerStarted","Data":"a8b4e64247a145e30bae1805a3035251d76d0ace76ccf66ffce24cc812cfccc2"} Feb 01 07:41:10 crc kubenswrapper[4650]: I0201 07:41:10.122901 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:10 crc kubenswrapper[4650]: I0201 07:41:10.153131 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" podStartSLOduration=3.153109692 podStartE2EDuration="3.153109692s" podCreationTimestamp="2026-02-01 07:41:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:10.150500363 +0000 UTC m=+1068.873598628" watchObservedRunningTime="2026-02-01 07:41:10.153109692 +0000 UTC m=+1068.876207957" Feb 01 07:41:11 crc kubenswrapper[4650]: I0201 07:41:11.128949 4650 generic.go:334] "Generic (PLEG): container finished" podID="3127db22-5d48-4d3e-bd3e-806a06e6cad8" containerID="d70d4e097467770c1b69cc43e7089ed32e2a33e86124026db3fd2fbec8555db5" exitCode=0 Feb 01 07:41:11 crc kubenswrapper[4650]: I0201 07:41:11.129082 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sz8cr" event={"ID":"3127db22-5d48-4d3e-bd3e-806a06e6cad8","Type":"ContainerDied","Data":"d70d4e097467770c1b69cc43e7089ed32e2a33e86124026db3fd2fbec8555db5"} Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.598085 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.620630 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-combined-ca-bundle\") pod \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.620731 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7lcz\" (UniqueName: \"kubernetes.io/projected/3127db22-5d48-4d3e-bd3e-806a06e6cad8-kube-api-access-m7lcz\") pod \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.620864 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-config-data\") pod \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\" (UID: \"3127db22-5d48-4d3e-bd3e-806a06e6cad8\") " Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.662674 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3127db22-5d48-4d3e-bd3e-806a06e6cad8-kube-api-access-m7lcz" (OuterVolumeSpecName: "kube-api-access-m7lcz") pod "3127db22-5d48-4d3e-bd3e-806a06e6cad8" (UID: "3127db22-5d48-4d3e-bd3e-806a06e6cad8"). InnerVolumeSpecName "kube-api-access-m7lcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.668290 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3127db22-5d48-4d3e-bd3e-806a06e6cad8" (UID: "3127db22-5d48-4d3e-bd3e-806a06e6cad8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.703793 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-config-data" (OuterVolumeSpecName: "config-data") pod "3127db22-5d48-4d3e-bd3e-806a06e6cad8" (UID: "3127db22-5d48-4d3e-bd3e-806a06e6cad8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.722232 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.722296 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7lcz\" (UniqueName: \"kubernetes.io/projected/3127db22-5d48-4d3e-bd3e-806a06e6cad8-kube-api-access-m7lcz\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:12 crc kubenswrapper[4650]: I0201 07:41:12.722313 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3127db22-5d48-4d3e-bd3e-806a06e6cad8-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.152785 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sz8cr" event={"ID":"3127db22-5d48-4d3e-bd3e-806a06e6cad8","Type":"ContainerDied","Data":"39222268f3e481813c0a94185bc97fa0a31d820a98a537bbd8a1d7457bca71c7"} Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.153127 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39222268f3e481813c0a94185bc97fa0a31d820a98a537bbd8a1d7457bca71c7" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.153348 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sz8cr" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.417917 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-ssvxw"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.418258 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" podUID="df2a9e33-1389-4213-a5b7-7d749e523079" containerName="dnsmasq-dns" containerID="cri-o://a8b4e64247a145e30bae1805a3035251d76d0ace76ccf66ffce24cc812cfccc2" gracePeriod=10 Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.448097 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-qstsn"] Feb 01 07:41:13 crc kubenswrapper[4650]: E0201 07:41:13.448593 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3127db22-5d48-4d3e-bd3e-806a06e6cad8" containerName="keystone-db-sync" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.448616 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="3127db22-5d48-4d3e-bd3e-806a06e6cad8" containerName="keystone-db-sync" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.448809 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="3127db22-5d48-4d3e-bd3e-806a06e6cad8" containerName="keystone-db-sync" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.449589 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.457742 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.457766 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8zg69" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.457835 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.457980 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.458601 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.460163 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qstsn"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.492617 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-xbn7z"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.494591 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.543056 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-xbn7z"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645149 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-dns-svc\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645223 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-credential-keys\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645478 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-sb\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645515 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9hqp\" (UniqueName: \"kubernetes.io/projected/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-kube-api-access-n9hqp\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645562 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-config-data\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645586 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-config\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645618 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645648 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-scripts\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645670 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-fernet-keys\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645695 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-combined-ca-bundle\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.645734 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cj52\" (UniqueName: \"kubernetes.io/projected/1530bd4b-d35a-42ac-b85f-88d790abf462-kube-api-access-8cj52\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.746875 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-config-data\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.746932 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-config\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.746958 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.746984 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-scripts\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.747004 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-fernet-keys\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.747752 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-combined-ca-bundle\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.747796 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cj52\" (UniqueName: \"kubernetes.io/projected/1530bd4b-d35a-42ac-b85f-88d790abf462-kube-api-access-8cj52\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.747826 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-dns-svc\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.747845 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-credential-keys\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.747881 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-sb\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.747906 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9hqp\" (UniqueName: \"kubernetes.io/projected/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-kube-api-access-n9hqp\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.748050 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-config\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.748626 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-dns-svc\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.748060 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.749243 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-sb\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.758194 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-scripts\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.758568 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-config-data\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.758865 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-credential-keys\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.800739 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-combined-ca-bundle\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.807577 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-fernet-keys\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.813953 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-x99cv"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.814914 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.824700 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.825221 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.825475 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-rtbt8" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.825974 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9hqp\" (UniqueName: \"kubernetes.io/projected/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-kube-api-access-n9hqp\") pod \"dnsmasq-dns-7d5679f497-xbn7z\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.831417 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.838125 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-x99cv"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.841917 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-695d6f76c-qccxs"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.843571 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.851118 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.851289 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.851400 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.851508 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-xlkx4" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.853932 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cj52\" (UniqueName: \"kubernetes.io/projected/1530bd4b-d35a-42ac-b85f-88d790abf462-kube-api-access-8cj52\") pod \"keystone-bootstrap-qstsn\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.910167 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-695d6f76c-qccxs"] Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958583 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-config-data\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958638 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3f97afc-40d4-4fc4-be00-1280202c0a31-logs\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958657 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/00154668-79cc-4c4d-81f9-e7975168f700-etc-machine-id\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958683 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-scripts\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958705 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tnd7\" (UniqueName: \"kubernetes.io/projected/f3f97afc-40d4-4fc4-be00-1280202c0a31-kube-api-access-6tnd7\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958724 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-config-data\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958748 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdsvl\" (UniqueName: \"kubernetes.io/projected/00154668-79cc-4c4d-81f9-e7975168f700-kube-api-access-jdsvl\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958771 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f3f97afc-40d4-4fc4-be00-1280202c0a31-horizon-secret-key\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958786 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-scripts\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958815 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-combined-ca-bundle\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:13 crc kubenswrapper[4650]: I0201 07:41:13.958835 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-db-sync-config-data\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060427 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdsvl\" (UniqueName: \"kubernetes.io/projected/00154668-79cc-4c4d-81f9-e7975168f700-kube-api-access-jdsvl\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060475 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f3f97afc-40d4-4fc4-be00-1280202c0a31-horizon-secret-key\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060496 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-scripts\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060533 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-combined-ca-bundle\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060554 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-db-sync-config-data\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060618 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-config-data\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060638 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3f97afc-40d4-4fc4-be00-1280202c0a31-logs\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060657 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/00154668-79cc-4c4d-81f9-e7975168f700-etc-machine-id\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060683 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-scripts\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060706 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tnd7\" (UniqueName: \"kubernetes.io/projected/f3f97afc-40d4-4fc4-be00-1280202c0a31-kube-api-access-6tnd7\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.060726 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-config-data\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.062068 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-config-data\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.072149 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/00154668-79cc-4c4d-81f9-e7975168f700-etc-machine-id\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.073000 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f3f97afc-40d4-4fc4-be00-1280202c0a31-horizon-secret-key\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.073293 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3f97afc-40d4-4fc4-be00-1280202c0a31-logs\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.074649 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-scripts\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.090694 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-scripts\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.094043 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-combined-ca-bundle\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.094780 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-config-data\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.095822 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-db-sync-config-data\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.101662 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.117573 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.139354 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-kzjnq"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.140532 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.148157 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.148932 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bkmh9" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.164345 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdsvl\" (UniqueName: \"kubernetes.io/projected/00154668-79cc-4c4d-81f9-e7975168f700-kube-api-access-jdsvl\") pod \"cinder-db-sync-x99cv\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.164628 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.165273 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.165534 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.172062 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-d4zk4"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.173407 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.180910 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-dd86b" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.185497 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tnd7\" (UniqueName: \"kubernetes.io/projected/f3f97afc-40d4-4fc4-be00-1280202c0a31-kube-api-access-6tnd7\") pod \"horizon-695d6f76c-qccxs\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.182378 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.182421 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.205391 4650 generic.go:334] "Generic (PLEG): container finished" podID="df2a9e33-1389-4213-a5b7-7d749e523079" containerID="a8b4e64247a145e30bae1805a3035251d76d0ace76ccf66ffce24cc812cfccc2" exitCode=0 Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.205433 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" event={"ID":"df2a9e33-1389-4213-a5b7-7d749e523079","Type":"ContainerDied","Data":"a8b4e64247a145e30bae1805a3035251d76d0ace76ccf66ffce24cc812cfccc2"} Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.212786 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x99cv" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.222394 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.222917 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.232808 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-kzjnq"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.266086 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-5gv78"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.267223 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268162 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76h48\" (UniqueName: \"kubernetes.io/projected/9d6a29ee-be36-4454-bf92-6dfffd45687b-kube-api-access-76h48\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268313 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vr6s\" (UniqueName: \"kubernetes.io/projected/2208b1dc-dbac-498a-a760-21257b722e80-kube-api-access-6vr6s\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268411 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268487 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268572 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dvxp\" (UniqueName: \"kubernetes.io/projected/b243d67e-b432-4b66-aa65-05cdbc100cb7-kube-api-access-7dvxp\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268637 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6a29ee-be36-4454-bf92-6dfffd45687b-logs\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268703 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-combined-ca-bundle\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268765 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-scripts\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268855 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-run-httpd\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.268961 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-db-sync-config-data\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.269048 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-log-httpd\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.269121 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-combined-ca-bundle\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.269190 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-config-data\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.269248 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-config-data\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.269321 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-scripts\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.306898 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.307548 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.343206 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-d4zk4"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.349822 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qnnk9" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.389774 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-xbn7z"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.390091 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.401245 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.413724 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-combined-ca-bundle\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.413865 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dvxp\" (UniqueName: \"kubernetes.io/projected/b243d67e-b432-4b66-aa65-05cdbc100cb7-kube-api-access-7dvxp\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.413967 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55lp9\" (UniqueName: \"kubernetes.io/projected/16da8114-b11c-449a-8cf7-17c1980cdcf7-kube-api-access-55lp9\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414085 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6a29ee-be36-4454-bf92-6dfffd45687b-logs\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414194 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-combined-ca-bundle\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414293 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-scripts\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414456 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-run-httpd\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414557 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-db-sync-config-data\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414677 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-log-httpd\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414774 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-config\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414885 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-combined-ca-bundle\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.414984 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-config-data\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.415088 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-config-data\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.415197 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-scripts\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.415310 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76h48\" (UniqueName: \"kubernetes.io/projected/9d6a29ee-be36-4454-bf92-6dfffd45687b-kube-api-access-76h48\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.415422 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vr6s\" (UniqueName: \"kubernetes.io/projected/2208b1dc-dbac-498a-a760-21257b722e80-kube-api-access-6vr6s\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.416733 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6a29ee-be36-4454-bf92-6dfffd45687b-logs\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.421903 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.422434 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.431221 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-combined-ca-bundle\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.431678 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-log-httpd\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.435081 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-scripts\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.438733 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-scripts\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.446747 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-run-httpd\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.453552 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-config-data\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.457567 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vr6s\" (UniqueName: \"kubernetes.io/projected/2208b1dc-dbac-498a-a760-21257b722e80-kube-api-access-6vr6s\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.461902 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-config-data\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.495875 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dvxp\" (UniqueName: \"kubernetes.io/projected/b243d67e-b432-4b66-aa65-05cdbc100cb7-kube-api-access-7dvxp\") pod \"ceilometer-0\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.498101 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-combined-ca-bundle\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.500501 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-db-sync-config-data\") pod \"barbican-db-sync-kzjnq\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.519258 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-combined-ca-bundle\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.519306 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55lp9\" (UniqueName: \"kubernetes.io/projected/16da8114-b11c-449a-8cf7-17c1980cdcf7-kube-api-access-55lp9\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.519385 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-config\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.519973 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.521271 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76h48\" (UniqueName: \"kubernetes.io/projected/9d6a29ee-be36-4454-bf92-6dfffd45687b-kube-api-access-76h48\") pod \"placement-db-sync-d4zk4\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.533894 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-config\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.533957 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-5gv78"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.553860 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-combined-ca-bundle\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.618100 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56798b757f-84j7d"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.619696 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.631241 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55lp9\" (UniqueName: \"kubernetes.io/projected/16da8114-b11c-449a-8cf7-17c1980cdcf7-kube-api-access-55lp9\") pod \"neutron-db-sync-5gv78\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.651200 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.655154 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-84j7d"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.715688 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-66d9bfcd5-6jkbm"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.717837 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.728138 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-config\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.728217 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.728248 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-dns-svc\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.728339 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmtqp\" (UniqueName: \"kubernetes.io/projected/dfb53357-604b-407e-8577-36288efeda68-kube-api-access-jmtqp\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.728365 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.734344 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66d9bfcd5-6jkbm"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.779458 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.833247 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmtqp\" (UniqueName: \"kubernetes.io/projected/dfb53357-604b-407e-8577-36288efeda68-kube-api-access-jmtqp\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.834078 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4zk4" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.834419 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.834467 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-config\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.834520 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.834548 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-dns-svc\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.835802 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.836811 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-config\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.845697 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-dns-svc\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.845973 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.889827 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmtqp\" (UniqueName: \"kubernetes.io/projected/dfb53357-604b-407e-8577-36288efeda68-kube-api-access-jmtqp\") pod \"dnsmasq-dns-56798b757f-84j7d\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.889890 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-xbn7z"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.927319 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.928693 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.955183 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46e1952e-615d-42f3-891c-d5a6b7cbd50a-logs\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.955483 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxg9h\" (UniqueName: \"kubernetes.io/projected/46e1952e-615d-42f3-891c-d5a6b7cbd50a-kube-api-access-zxg9h\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.955764 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46e1952e-615d-42f3-891c-d5a6b7cbd50a-horizon-secret-key\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.955863 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-scripts\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.956059 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-config-data\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.955624 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.955681 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.955710 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.957101 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-bc68g" Feb 01 07:41:14 crc kubenswrapper[4650]: I0201 07:41:14.996974 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.066999 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-config-data\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067069 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067097 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067137 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46e1952e-615d-42f3-891c-d5a6b7cbd50a-logs\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067160 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067188 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxg9h\" (UniqueName: \"kubernetes.io/projected/46e1952e-615d-42f3-891c-d5a6b7cbd50a-kube-api-access-zxg9h\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067206 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46e1952e-615d-42f3-891c-d5a6b7cbd50a-horizon-secret-key\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067232 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-scripts\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067263 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58lfv\" (UniqueName: \"kubernetes.io/projected/1fea4231-c72c-43db-b1d8-ee3a755df1b4-kube-api-access-58lfv\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067305 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067327 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-scripts\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067351 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.067388 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-logs\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.077156 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-config-data\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.077690 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46e1952e-615d-42f3-891c-d5a6b7cbd50a-logs\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.079182 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-scripts\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.085523 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46e1952e-615d-42f3-891c-d5a6b7cbd50a-horizon-secret-key\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.089449 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.134313 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxg9h\" (UniqueName: \"kubernetes.io/projected/46e1952e-615d-42f3-891c-d5a6b7cbd50a-kube-api-access-zxg9h\") pod \"horizon-66d9bfcd5-6jkbm\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.137072 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.138425 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.147577 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.148517 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.149371 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171318 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171360 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171394 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171436 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58lfv\" (UniqueName: \"kubernetes.io/projected/1fea4231-c72c-43db-b1d8-ee3a755df1b4-kube-api-access-58lfv\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171479 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171508 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-scripts\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171533 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.171562 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-logs\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.172008 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-logs\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.177752 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.179054 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.194094 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.194767 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.198093 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-scripts\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.233420 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.236404 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.236758 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:15 crc kubenswrapper[4650]: E0201 07:41:15.245584 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data glance httpd-run internal-tls-certs kube-api-access-q5c9d logs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-internal-api-0" podUID="9b04f283-988b-464e-9163-4c6f1f030893" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.252160 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58lfv\" (UniqueName: \"kubernetes.io/projected/1fea4231-c72c-43db-b1d8-ee3a755df1b4-kube-api-access-58lfv\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.263779 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" event={"ID":"69068da6-b9ac-48bc-a3ce-3d9a04b47efb","Type":"ContainerStarted","Data":"10142298ebdc6eee34095e419ac5cdb205b5d5c9908a75232bf2e7b53c8789ea"} Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.271771 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.272501 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-config\") pod \"df2a9e33-1389-4213-a5b7-7d749e523079\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.272616 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-sb\") pod \"df2a9e33-1389-4213-a5b7-7d749e523079\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.272640 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dk7z5\" (UniqueName: \"kubernetes.io/projected/df2a9e33-1389-4213-a5b7-7d749e523079-kube-api-access-dk7z5\") pod \"df2a9e33-1389-4213-a5b7-7d749e523079\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.272776 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-dns-svc\") pod \"df2a9e33-1389-4213-a5b7-7d749e523079\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.272803 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-nb\") pod \"df2a9e33-1389-4213-a5b7-7d749e523079\" (UID: \"df2a9e33-1389-4213-a5b7-7d749e523079\") " Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.272969 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5c9d\" (UniqueName: \"kubernetes.io/projected/9b04f283-988b-464e-9163-4c6f1f030893-kube-api-access-q5c9d\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.273039 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.273054 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.273104 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-logs\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.273119 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.273151 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.273202 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.273238 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.311077 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" event={"ID":"df2a9e33-1389-4213-a5b7-7d749e523079","Type":"ContainerDied","Data":"c2f4283d0ed151684763ba926445a7bde4b2a5e26eaceb0036163cb69e2a7247"} Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.311419 4650 scope.go:117] "RemoveContainer" containerID="a8b4e64247a145e30bae1805a3035251d76d0ace76ccf66ffce24cc812cfccc2" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.313038 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-ssvxw" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.355187 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.369013 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df2a9e33-1389-4213-a5b7-7d749e523079-kube-api-access-dk7z5" (OuterVolumeSpecName: "kube-api-access-dk7z5") pod "df2a9e33-1389-4213-a5b7-7d749e523079" (UID: "df2a9e33-1389-4213-a5b7-7d749e523079"). InnerVolumeSpecName "kube-api-access-dk7z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.375890 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376038 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376127 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376198 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5c9d\" (UniqueName: \"kubernetes.io/projected/9b04f283-988b-464e-9163-4c6f1f030893-kube-api-access-q5c9d\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376292 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376356 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376473 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-logs\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376534 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.376636 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dk7z5\" (UniqueName: \"kubernetes.io/projected/df2a9e33-1389-4213-a5b7-7d749e523079-kube-api-access-dk7z5\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.377046 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.377197 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.404607 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "df2a9e33-1389-4213-a5b7-7d749e523079" (UID: "df2a9e33-1389-4213-a5b7-7d749e523079"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.408908 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.409279 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-logs\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.409747 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "df2a9e33-1389-4213-a5b7-7d749e523079" (UID: "df2a9e33-1389-4213-a5b7-7d749e523079"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.476908 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.480723 4650 scope.go:117] "RemoveContainer" containerID="26178dd82ae72fec749913f8555883cb8722a26edd98fa6638b66c5dfba79c32" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.482070 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.482101 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.491249 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.530557 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "df2a9e33-1389-4213-a5b7-7d749e523079" (UID: "df2a9e33-1389-4213-a5b7-7d749e523079"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.560128 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.563188 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5c9d\" (UniqueName: \"kubernetes.io/projected/9b04f283-988b-464e-9163-4c6f1f030893-kube-api-access-q5c9d\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.565267 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.570729 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-config" (OuterVolumeSpecName: "config") pod "df2a9e33-1389-4213-a5b7-7d749e523079" (UID: "df2a9e33-1389-4213-a5b7-7d749e523079"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.575427 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.584206 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.584254 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2a9e33-1389-4213-a5b7-7d749e523079-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.786446 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-ssvxw"] Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.794466 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-ssvxw"] Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.884777 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-x99cv"] Feb 01 07:41:15 crc kubenswrapper[4650]: I0201 07:41:15.977095 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df2a9e33-1389-4213-a5b7-7d749e523079" path="/var/lib/kubelet/pods/df2a9e33-1389-4213-a5b7-7d749e523079/volumes" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.178639 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-5gv78"] Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.328194 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x99cv" event={"ID":"00154668-79cc-4c4d-81f9-e7975168f700","Type":"ContainerStarted","Data":"a26dd22ebba254fe4783437cb51de623db7cbc3fdf767d809ecbdf98686b361c"} Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.331106 4650 generic.go:334] "Generic (PLEG): container finished" podID="69068da6-b9ac-48bc-a3ce-3d9a04b47efb" containerID="6ec2293424e15e987a0c392071a5867eff43ef650fb2481d3c45b14acafadc9f" exitCode=0 Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.331167 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" event={"ID":"69068da6-b9ac-48bc-a3ce-3d9a04b47efb","Type":"ContainerDied","Data":"6ec2293424e15e987a0c392071a5867eff43ef650fb2481d3c45b14acafadc9f"} Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.341518 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5gv78" event={"ID":"16da8114-b11c-449a-8cf7-17c1980cdcf7","Type":"ContainerStarted","Data":"044d19a61f78006756054e086ba5a0ccb8e22a6cf385124abc58f3c4a587ba30"} Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.343778 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.367151 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511106 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-logs\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511199 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-config-data\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511224 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-internal-tls-certs\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511251 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511291 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-combined-ca-bundle\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511312 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5c9d\" (UniqueName: \"kubernetes.io/projected/9b04f283-988b-464e-9163-4c6f1f030893-kube-api-access-q5c9d\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511399 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-httpd-run\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.511439 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-scripts\") pod \"9b04f283-988b-464e-9163-4c6f1f030893\" (UID: \"9b04f283-988b-464e-9163-4c6f1f030893\") " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.512356 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.512392 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-logs" (OuterVolumeSpecName: "logs") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.512877 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.512890 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b04f283-988b-464e-9163-4c6f1f030893-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.520193 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-config-data" (OuterVolumeSpecName: "config-data") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.521507 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-scripts" (OuterVolumeSpecName: "scripts") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.523200 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.524708 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b04f283-988b-464e-9163-4c6f1f030893-kube-api-access-q5c9d" (OuterVolumeSpecName: "kube-api-access-q5c9d") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "kube-api-access-q5c9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.533168 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.544747 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-d4zk4"] Feb 01 07:41:16 crc kubenswrapper[4650]: W0201 07:41:16.561370 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d6a29ee_be36_4454_bf92_6dfffd45687b.slice/crio-1105e710fca54cc120195610252154eff4e55e46ce3e0055cd8090cb6a4e1bbf WatchSource:0}: Error finding container 1105e710fca54cc120195610252154eff4e55e46ce3e0055cd8090cb6a4e1bbf: Status 404 returned error can't find the container with id 1105e710fca54cc120195610252154eff4e55e46ce3e0055cd8090cb6a4e1bbf Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.561484 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b04f283-988b-464e-9163-4c6f1f030893" (UID: "9b04f283-988b-464e-9163-4c6f1f030893"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.616081 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.616109 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.619295 4650 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.619340 4650 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.619350 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b04f283-988b-464e-9163-4c6f1f030893-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.619358 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5c9d\" (UniqueName: \"kubernetes.io/projected/9b04f283-988b-464e-9163-4c6f1f030893-kube-api-access-q5c9d\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.628142 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-kzjnq"] Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.649406 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-84j7d"] Feb 01 07:41:16 crc kubenswrapper[4650]: W0201 07:41:16.657867 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfb53357_604b_407e_8577_36288efeda68.slice/crio-7561426deccf293624b1ba32a6098facf72a6a7c9a2a449385e151c05a5e34ac WatchSource:0}: Error finding container 7561426deccf293624b1ba32a6098facf72a6a7c9a2a449385e151c05a5e34ac: Status 404 returned error can't find the container with id 7561426deccf293624b1ba32a6098facf72a6a7c9a2a449385e151c05a5e34ac Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.666924 4650 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.687033 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-695d6f76c-qccxs"] Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.713607 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qstsn"] Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.721394 4650 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:16 crc kubenswrapper[4650]: I0201 07:41:16.727078 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.111208 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.138197 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-695d6f76c-qccxs"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.191337 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5db79774bc-nc4br"] Feb 01 07:41:17 crc kubenswrapper[4650]: E0201 07:41:17.191699 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df2a9e33-1389-4213-a5b7-7d749e523079" containerName="init" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.191717 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="df2a9e33-1389-4213-a5b7-7d749e523079" containerName="init" Feb 01 07:41:17 crc kubenswrapper[4650]: E0201 07:41:17.191730 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df2a9e33-1389-4213-a5b7-7d749e523079" containerName="dnsmasq-dns" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.191737 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="df2a9e33-1389-4213-a5b7-7d749e523079" containerName="dnsmasq-dns" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.191902 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="df2a9e33-1389-4213-a5b7-7d749e523079" containerName="dnsmasq-dns" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.198287 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.226813 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66d9bfcd5-6jkbm"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.246978 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5db79774bc-nc4br"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.339581 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-config-data\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.339630 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpmw2\" (UniqueName: \"kubernetes.io/projected/299a9a2f-8631-455d-8e97-dfc5e8a17734-kube-api-access-lpmw2\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.339698 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/299a9a2f-8631-455d-8e97-dfc5e8a17734-horizon-secret-key\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.339753 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/299a9a2f-8631-455d-8e97-dfc5e8a17734-logs\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.339790 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-scripts\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.344879 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.357622 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.370609 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4zk4" event={"ID":"9d6a29ee-be36-4454-bf92-6dfffd45687b","Type":"ContainerStarted","Data":"1105e710fca54cc120195610252154eff4e55e46ce3e0055cd8090cb6a4e1bbf"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.391821 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5gv78" event={"ID":"16da8114-b11c-449a-8cf7-17c1980cdcf7","Type":"ContainerStarted","Data":"6c4f59f47f499c833fd050a42711bc0cd8616de68ce9a799bc57cf737b8ca09b"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.422222 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-695d6f76c-qccxs" event={"ID":"f3f97afc-40d4-4fc4-be00-1280202c0a31","Type":"ContainerStarted","Data":"8f052fd8c8f557cd394003259687d65a2bfc7c8955f6c7876aecfcf9bfeb8dab"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.437269 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-5gv78" podStartSLOduration=3.437248191 podStartE2EDuration="3.437248191s" podCreationTimestamp="2026-02-01 07:41:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:17.416683849 +0000 UTC m=+1076.139782104" watchObservedRunningTime="2026-02-01 07:41:17.437248191 +0000 UTC m=+1076.160346436" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.441444 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qstsn" event={"ID":"1530bd4b-d35a-42ac-b85f-88d790abf462","Type":"ContainerStarted","Data":"32ad1182027a79711c7b05b9e87b965568175662fce9da229bc4f893ac741beb"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.441483 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qstsn" event={"ID":"1530bd4b-d35a-42ac-b85f-88d790abf462","Type":"ContainerStarted","Data":"75520b2e5ff2a8458f00d3deb2b29a59f06846f85e8d464fcb7f8429fb7d4275"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.442834 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-nb\") pod \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.442877 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9hqp\" (UniqueName: \"kubernetes.io/projected/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-kube-api-access-n9hqp\") pod \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.442914 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-sb\") pod \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.442932 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-config\") pod \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.442982 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-dns-svc\") pod \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\" (UID: \"69068da6-b9ac-48bc-a3ce-3d9a04b47efb\") " Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.443197 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/299a9a2f-8631-455d-8e97-dfc5e8a17734-logs\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.443237 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-scripts\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.443274 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-config-data\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.443296 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpmw2\" (UniqueName: \"kubernetes.io/projected/299a9a2f-8631-455d-8e97-dfc5e8a17734-kube-api-access-lpmw2\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.443346 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/299a9a2f-8631-455d-8e97-dfc5e8a17734-horizon-secret-key\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.446920 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/299a9a2f-8631-455d-8e97-dfc5e8a17734-logs\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.463006 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-scripts\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.465301 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/299a9a2f-8631-455d-8e97-dfc5e8a17734-horizon-secret-key\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.471445 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-kzjnq" event={"ID":"2208b1dc-dbac-498a-a760-21257b722e80","Type":"ContainerStarted","Data":"dbb2991e778e7379c7450119910b74520c3806948faea3525a6bd5408f81559e"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.471683 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-kube-api-access-n9hqp" (OuterVolumeSpecName: "kube-api-access-n9hqp") pod "69068da6-b9ac-48bc-a3ce-3d9a04b47efb" (UID: "69068da6-b9ac-48bc-a3ce-3d9a04b47efb"). InnerVolumeSpecName "kube-api-access-n9hqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.472383 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-config-data\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.504431 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" event={"ID":"69068da6-b9ac-48bc-a3ce-3d9a04b47efb","Type":"ContainerDied","Data":"10142298ebdc6eee34095e419ac5cdb205b5d5c9908a75232bf2e7b53c8789ea"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.504492 4650 scope.go:117] "RemoveContainer" containerID="6ec2293424e15e987a0c392071a5867eff43ef650fb2481d3c45b14acafadc9f" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.504683 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-xbn7z" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.514434 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66d9bfcd5-6jkbm" event={"ID":"46e1952e-615d-42f3-891c-d5a6b7cbd50a","Type":"ContainerStarted","Data":"8554b8fcc9bbe19e56bbac95d5f08cbd3e8049f3450e15e2193ca81a7ae83481"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.527717 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-84j7d" event={"ID":"dfb53357-604b-407e-8577-36288efeda68","Type":"ContainerStarted","Data":"7561426deccf293624b1ba32a6098facf72a6a7c9a2a449385e151c05a5e34ac"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.530423 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpmw2\" (UniqueName: \"kubernetes.io/projected/299a9a2f-8631-455d-8e97-dfc5e8a17734-kube-api-access-lpmw2\") pod \"horizon-5db79774bc-nc4br\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.531148 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "69068da6-b9ac-48bc-a3ce-3d9a04b47efb" (UID: "69068da6-b9ac-48bc-a3ce-3d9a04b47efb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.543431 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.545560 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerStarted","Data":"b63abc3cf3d3d58eca7d271f11cbe4456a1e1d164b6eb75a5b49953f36e03ae8"} Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.547977 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.547998 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9hqp\" (UniqueName: \"kubernetes.io/projected/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-kube-api-access-n9hqp\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.550360 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.552336 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "69068da6-b9ac-48bc-a3ce-3d9a04b47efb" (UID: "69068da6-b9ac-48bc-a3ce-3d9a04b47efb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.567463 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.568359 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "69068da6-b9ac-48bc-a3ce-3d9a04b47efb" (UID: "69068da6-b9ac-48bc-a3ce-3d9a04b47efb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.584190 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-config" (OuterVolumeSpecName: "config") pod "69068da6-b9ac-48bc-a3ce-3d9a04b47efb" (UID: "69068da6-b9ac-48bc-a3ce-3d9a04b47efb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.586794 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-qstsn" podStartSLOduration=4.586759833 podStartE2EDuration="4.586759833s" podCreationTimestamp="2026-02-01 07:41:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:17.510579165 +0000 UTC m=+1076.233677410" watchObservedRunningTime="2026-02-01 07:41:17.586759833 +0000 UTC m=+1076.309858078" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.650941 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.650967 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.650977 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69068da6-b9ac-48bc-a3ce-3d9a04b47efb-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.817713 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.845772 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.913677 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: E0201 07:41:17.914109 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69068da6-b9ac-48bc-a3ce-3d9a04b47efb" containerName="init" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.914123 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="69068da6-b9ac-48bc-a3ce-3d9a04b47efb" containerName="init" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.916616 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="69068da6-b9ac-48bc-a3ce-3d9a04b47efb" containerName="init" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.919926 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.929898 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.930106 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.950766 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.959950 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.959998 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.960017 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.960072 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.960088 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9872m\" (UniqueName: \"kubernetes.io/projected/97945bc6-4707-42b5-ace3-113abc710e3e-kube-api-access-9872m\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.960162 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.960181 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-logs\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:17 crc kubenswrapper[4650]: I0201 07:41:17.960235 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:17.990921 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b04f283-988b-464e-9163-4c6f1f030893" path="/var/lib/kubelet/pods/9b04f283-988b-464e-9163-4c6f1f030893/volumes" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.022150 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-xbn7z"] Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.061901 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.061956 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.061993 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.062007 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.062059 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.062077 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9872m\" (UniqueName: \"kubernetes.io/projected/97945bc6-4707-42b5-ace3-113abc710e3e-kube-api-access-9872m\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.062136 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.062152 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-logs\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.062601 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-logs\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.062856 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.065404 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.075086 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-xbn7z"] Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.090233 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.098814 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9872m\" (UniqueName: \"kubernetes.io/projected/97945bc6-4707-42b5-ace3-113abc710e3e-kube-api-access-9872m\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.110868 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.112189 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.122332 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.129869 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.269191 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.573961 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1fea4231-c72c-43db-b1d8-ee3a755df1b4","Type":"ContainerStarted","Data":"01c13b524b48147c95fb9da2f9875094c7109d5a96ec3cf373de125c470900af"} Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.600257 4650 generic.go:334] "Generic (PLEG): container finished" podID="dfb53357-604b-407e-8577-36288efeda68" containerID="f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d" exitCode=0 Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.601776 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-84j7d" event={"ID":"dfb53357-604b-407e-8577-36288efeda68","Type":"ContainerDied","Data":"f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d"} Feb 01 07:41:18 crc kubenswrapper[4650]: I0201 07:41:18.745666 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5db79774bc-nc4br"] Feb 01 07:41:19 crc kubenswrapper[4650]: W0201 07:41:19.248763 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97945bc6_4707_42b5_ace3_113abc710e3e.slice/crio-2f62a1cce6a20e84531d4aa2737bd09c0fa99cf64e4ceef5a322b1d50e134c0d WatchSource:0}: Error finding container 2f62a1cce6a20e84531d4aa2737bd09c0fa99cf64e4ceef5a322b1d50e134c0d: Status 404 returned error can't find the container with id 2f62a1cce6a20e84531d4aa2737bd09c0fa99cf64e4ceef5a322b1d50e134c0d Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.253923 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.621144 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1fea4231-c72c-43db-b1d8-ee3a755df1b4","Type":"ContainerStarted","Data":"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94"} Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.635420 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-84j7d" event={"ID":"dfb53357-604b-407e-8577-36288efeda68","Type":"ContainerStarted","Data":"fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0"} Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.636002 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.641002 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5db79774bc-nc4br" event={"ID":"299a9a2f-8631-455d-8e97-dfc5e8a17734","Type":"ContainerStarted","Data":"4ffde89b368c78683d95328b4194957cefea15af052cf56d8f1f4244cf895ec6"} Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.646835 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97945bc6-4707-42b5-ace3-113abc710e3e","Type":"ContainerStarted","Data":"2f62a1cce6a20e84531d4aa2737bd09c0fa99cf64e4ceef5a322b1d50e134c0d"} Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.658202 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56798b757f-84j7d" podStartSLOduration=5.658185911 podStartE2EDuration="5.658185911s" podCreationTimestamp="2026-02-01 07:41:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:19.653289982 +0000 UTC m=+1078.376388227" watchObservedRunningTime="2026-02-01 07:41:19.658185911 +0000 UTC m=+1078.381284156" Feb 01 07:41:19 crc kubenswrapper[4650]: I0201 07:41:19.981223 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69068da6-b9ac-48bc-a3ce-3d9a04b47efb" path="/var/lib/kubelet/pods/69068da6-b9ac-48bc-a3ce-3d9a04b47efb/volumes" Feb 01 07:41:20 crc kubenswrapper[4650]: I0201 07:41:20.668927 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1fea4231-c72c-43db-b1d8-ee3a755df1b4","Type":"ContainerStarted","Data":"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629"} Feb 01 07:41:20 crc kubenswrapper[4650]: I0201 07:41:20.670601 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-log" containerID="cri-o://58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94" gracePeriod=30 Feb 01 07:41:20 crc kubenswrapper[4650]: I0201 07:41:20.671168 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-httpd" containerID="cri-o://5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629" gracePeriod=30 Feb 01 07:41:20 crc kubenswrapper[4650]: I0201 07:41:20.683636 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97945bc6-4707-42b5-ace3-113abc710e3e","Type":"ContainerStarted","Data":"79076c939504f1325536395e93ee670f57352524c1972b4dee9b8d73d9659a73"} Feb 01 07:41:20 crc kubenswrapper[4650]: I0201 07:41:20.722248 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.722227152 podStartE2EDuration="6.722227152s" podCreationTimestamp="2026-02-01 07:41:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:20.706520338 +0000 UTC m=+1079.429618593" watchObservedRunningTime="2026-02-01 07:41:20.722227152 +0000 UTC m=+1079.445325397" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.514519 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.679901 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-logs\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.679951 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-httpd-run\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680042 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-public-tls-certs\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680076 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58lfv\" (UniqueName: \"kubernetes.io/projected/1fea4231-c72c-43db-b1d8-ee3a755df1b4-kube-api-access-58lfv\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680125 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-combined-ca-bundle\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680166 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680185 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-scripts\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680280 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680460 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-logs" (OuterVolumeSpecName: "logs") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.680767 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.683004 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.690622 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.690826 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fea4231-c72c-43db-b1d8-ee3a755df1b4-kube-api-access-58lfv" (OuterVolumeSpecName: "kube-api-access-58lfv") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "kube-api-access-58lfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.714300 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-scripts" (OuterVolumeSpecName: "scripts") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.726549 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.743040 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.742954 4650 generic.go:334] "Generic (PLEG): container finished" podID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerID="5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629" exitCode=143 Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.743113 4650 generic.go:334] "Generic (PLEG): container finished" podID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerID="58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94" exitCode=143 Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.743134 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1fea4231-c72c-43db-b1d8-ee3a755df1b4","Type":"ContainerDied","Data":"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629"} Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.743161 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1fea4231-c72c-43db-b1d8-ee3a755df1b4","Type":"ContainerDied","Data":"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94"} Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.743171 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1fea4231-c72c-43db-b1d8-ee3a755df1b4","Type":"ContainerDied","Data":"01c13b524b48147c95fb9da2f9875094c7109d5a96ec3cf373de125c470900af"} Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.743987 4650 scope.go:117] "RemoveContainer" containerID="5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.761244 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.780948 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data" (OuterVolumeSpecName: "config-data") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.782454 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data\") pod \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\" (UID: \"1fea4231-c72c-43db-b1d8-ee3a755df1b4\") " Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.783604 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.783630 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1fea4231-c72c-43db-b1d8-ee3a755df1b4-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.783644 4650 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.783658 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58lfv\" (UniqueName: \"kubernetes.io/projected/1fea4231-c72c-43db-b1d8-ee3a755df1b4-kube-api-access-58lfv\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.783669 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.783688 4650 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Feb 01 07:41:21 crc kubenswrapper[4650]: W0201 07:41:21.784426 4650 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/1fea4231-c72c-43db-b1d8-ee3a755df1b4/volumes/kubernetes.io~secret/config-data Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.784453 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data" (OuterVolumeSpecName: "config-data") pod "1fea4231-c72c-43db-b1d8-ee3a755df1b4" (UID: "1fea4231-c72c-43db-b1d8-ee3a755df1b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.810291 4650 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.865951 4650 scope.go:117] "RemoveContainer" containerID="58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.886148 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fea4231-c72c-43db-b1d8-ee3a755df1b4-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.886188 4650 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.914019 4650 scope.go:117] "RemoveContainer" containerID="5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629" Feb 01 07:41:21 crc kubenswrapper[4650]: E0201 07:41:21.915914 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629\": container with ID starting with 5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629 not found: ID does not exist" containerID="5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.915957 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629"} err="failed to get container status \"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629\": rpc error: code = NotFound desc = could not find container \"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629\": container with ID starting with 5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629 not found: ID does not exist" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.916000 4650 scope.go:117] "RemoveContainer" containerID="58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94" Feb 01 07:41:21 crc kubenswrapper[4650]: E0201 07:41:21.917624 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94\": container with ID starting with 58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94 not found: ID does not exist" containerID="58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.917650 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94"} err="failed to get container status \"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94\": rpc error: code = NotFound desc = could not find container \"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94\": container with ID starting with 58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94 not found: ID does not exist" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.917667 4650 scope.go:117] "RemoveContainer" containerID="5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.917980 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629"} err="failed to get container status \"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629\": rpc error: code = NotFound desc = could not find container \"5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629\": container with ID starting with 5344b47132af4a8c2c8da92ae49c8e72b2f4f38410a5ae2ee48d6ecec67ef629 not found: ID does not exist" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.918057 4650 scope.go:117] "RemoveContainer" containerID="58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94" Feb 01 07:41:21 crc kubenswrapper[4650]: I0201 07:41:21.918738 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94"} err="failed to get container status \"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94\": rpc error: code = NotFound desc = could not find container \"58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94\": container with ID starting with 58a14f8f067dc879f0656064b5ee982a71cf7a8c8033dc3b34cace57802f1b94 not found: ID does not exist" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.040607 4650 scope.go:117] "RemoveContainer" containerID="d2f715a393c524210c739be45a6809ad759184af4ae9bdac9460496004b7b00d" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.040988 4650 scope.go:117] "RemoveContainer" containerID="0fb0fa378a10d2f58c054ac5c1759a8c5f3b3807d985594164a305e6defd887a" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.041121 4650 scope.go:117] "RemoveContainer" containerID="b4fe5ae45d159834a9c667093e751e227a564d39ec95f64cc51d8b99cc229ac8" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.275980 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.288578 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.296244 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:22 crc kubenswrapper[4650]: E0201 07:41:22.296623 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-log" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.296644 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-log" Feb 01 07:41:22 crc kubenswrapper[4650]: E0201 07:41:22.296666 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-httpd" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.296681 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-httpd" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.296850 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-httpd" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.296863 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" containerName="glance-log" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.297947 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.300642 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.300902 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.334837 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404129 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404195 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404216 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9glhx\" (UniqueName: \"kubernetes.io/projected/a569cd99-6b07-46d7-b2c9-ef80aa27976e-kube-api-access-9glhx\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404278 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-scripts\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404303 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404369 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404386 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-logs\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.404414 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-config-data\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505502 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505549 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-logs\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505590 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-config-data\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505631 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505658 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505683 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9glhx\" (UniqueName: \"kubernetes.io/projected/a569cd99-6b07-46d7-b2c9-ef80aa27976e-kube-api-access-9glhx\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505756 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-scripts\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.505789 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.507753 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.508082 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-logs\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.509194 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.513884 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.515049 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-config-data\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.519514 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-scripts\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.522892 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9glhx\" (UniqueName: \"kubernetes.io/projected/a569cd99-6b07-46d7-b2c9-ef80aa27976e-kube-api-access-9glhx\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.523371 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.544214 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.622351 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.815356 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18"} Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.818630 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97945bc6-4707-42b5-ace3-113abc710e3e","Type":"ContainerStarted","Data":"a44f7992069ad5f19bb0410daa9f5292e190e85f2a7f8d50f7b82ee5493e1454"} Feb 01 07:41:22 crc kubenswrapper[4650]: I0201 07:41:22.854376 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.854358811 podStartE2EDuration="5.854358811s" podCreationTimestamp="2026-02-01 07:41:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:22.850735846 +0000 UTC m=+1081.573834091" watchObservedRunningTime="2026-02-01 07:41:22.854358811 +0000 UTC m=+1081.577457046" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.391363 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.603382 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-66d9bfcd5-6jkbm"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.653899 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5b4d45c6bd-qsdbt"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.655201 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.670357 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.689798 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b4d45c6bd-qsdbt"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.736110 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5db79774bc-nc4br"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.742060 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e572f25-ea86-45a7-b828-214b813f9d0c-logs\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.742277 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-tls-certs\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.742336 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-scripts\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.742462 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-secret-key\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.742578 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-config-data\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.742614 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-combined-ca-bundle\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.742661 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs5js\" (UniqueName: \"kubernetes.io/projected/7e572f25-ea86-45a7-b828-214b813f9d0c-kube-api-access-xs5js\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.793084 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.806834 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-79fd8b5f84-qg9cv"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.811205 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850314 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-secret-key\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850385 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-horizon-tls-certs\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850420 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c4bad14-279f-4212-a86d-cea1c9fe7b48-config-data\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850460 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr4gb\" (UniqueName: \"kubernetes.io/projected/9c4bad14-279f-4212-a86d-cea1c9fe7b48-kube-api-access-xr4gb\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850480 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c4bad14-279f-4212-a86d-cea1c9fe7b48-logs\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850512 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-config-data\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850536 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-horizon-secret-key\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850555 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-combined-ca-bundle\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850576 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9c4bad14-279f-4212-a86d-cea1c9fe7b48-scripts\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850599 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs5js\" (UniqueName: \"kubernetes.io/projected/7e572f25-ea86-45a7-b828-214b813f9d0c-kube-api-access-xs5js\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850642 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e572f25-ea86-45a7-b828-214b813f9d0c-logs\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850682 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-tls-certs\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850707 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-scripts\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.850750 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-combined-ca-bundle\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.874227 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e572f25-ea86-45a7-b828-214b813f9d0c-logs\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.875164 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-scripts\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.879740 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-config-data\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.895050 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79fd8b5f84-qg9cv"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.901849 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-combined-ca-bundle\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.915345 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs5js\" (UniqueName: \"kubernetes.io/projected/7e572f25-ea86-45a7-b828-214b813f9d0c-kube-api-access-xs5js\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.922546 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.936362 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-secret-key\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.936888 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-tls-certs\") pod \"horizon-5b4d45c6bd-qsdbt\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.939194 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" exitCode=1 Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.939229 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" exitCode=1 Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.939306 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b"} Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.939332 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18"} Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.939344 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595"} Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.939362 4650 scope.go:117] "RemoveContainer" containerID="0fb0fa378a10d2f58c054ac5c1759a8c5f3b3807d985594164a305e6defd887a" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.954399 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-horizon-tls-certs\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.963112 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c4bad14-279f-4212-a86d-cea1c9fe7b48-config-data\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.963241 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr4gb\" (UniqueName: \"kubernetes.io/projected/9c4bad14-279f-4212-a86d-cea1c9fe7b48-kube-api-access-xr4gb\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.963336 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c4bad14-279f-4212-a86d-cea1c9fe7b48-logs\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.963452 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-horizon-secret-key\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.963535 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9c4bad14-279f-4212-a86d-cea1c9fe7b48-scripts\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.963769 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-combined-ca-bundle\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.962809 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a569cd99-6b07-46d7-b2c9-ef80aa27976e","Type":"ContainerStarted","Data":"76195e72f53686f389803c79ef4985ffbc5f9354f481285e90c4c663f17e726e"} Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.960399 4650 scope.go:117] "RemoveContainer" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.969497 4650 scope.go:117] "RemoveContainer" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" Feb 01 07:41:23 crc kubenswrapper[4650]: E0201 07:41:23.970003 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.970802 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c4bad14-279f-4212-a86d-cea1c9fe7b48-logs\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.971729 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c4bad14-279f-4212-a86d-cea1c9fe7b48-config-data\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:23 crc kubenswrapper[4650]: I0201 07:41:23.980143 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9c4bad14-279f-4212-a86d-cea1c9fe7b48-scripts\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.007840 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr4gb\" (UniqueName: \"kubernetes.io/projected/9c4bad14-279f-4212-a86d-cea1c9fe7b48-kube-api-access-xr4gb\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.015554 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.017095 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-combined-ca-bundle\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.036557 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fea4231-c72c-43db-b1d8-ee3a755df1b4" path="/var/lib/kubelet/pods/1fea4231-c72c-43db-b1d8-ee3a755df1b4/volumes" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.046919 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-horizon-tls-certs\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.050477 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9c4bad14-279f-4212-a86d-cea1c9fe7b48-horizon-secret-key\") pod \"horizon-79fd8b5f84-qg9cv\" (UID: \"9c4bad14-279f-4212-a86d-cea1c9fe7b48\") " pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.133463 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.973626 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-log" containerID="cri-o://79076c939504f1325536395e93ee670f57352524c1972b4dee9b8d73d9659a73" gracePeriod=30 Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.974542 4650 scope.go:117] "RemoveContainer" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.974695 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-httpd" containerID="cri-o://a44f7992069ad5f19bb0410daa9f5292e190e85f2a7f8d50f7b82ee5493e1454" gracePeriod=30 Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.974843 4650 scope.go:117] "RemoveContainer" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" Feb 01 07:41:24 crc kubenswrapper[4650]: E0201 07:41:24.975316 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:41:24 crc kubenswrapper[4650]: I0201 07:41:24.999473 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:41:25 crc kubenswrapper[4650]: I0201 07:41:25.136693 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hjbmr"] Feb 01 07:41:25 crc kubenswrapper[4650]: I0201 07:41:25.141061 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" containerID="cri-o://7328dc1c02bb9990f7c8772431212dc76e5892644429a10b8063d3eee2081556" gracePeriod=10 Feb 01 07:41:25 crc kubenswrapper[4650]: I0201 07:41:25.999573 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595" exitCode=1 Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:25.999635 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595"} Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.001104 4650 scope.go:117] "RemoveContainer" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.001188 4650 scope.go:117] "RemoveContainer" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.001274 4650 scope.go:117] "RemoveContainer" containerID="b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595" Feb 01 07:41:26 crc kubenswrapper[4650]: E0201 07:41:26.001564 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.011188 4650 generic.go:334] "Generic (PLEG): container finished" podID="97945bc6-4707-42b5-ace3-113abc710e3e" containerID="a44f7992069ad5f19bb0410daa9f5292e190e85f2a7f8d50f7b82ee5493e1454" exitCode=0 Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.011221 4650 generic.go:334] "Generic (PLEG): container finished" podID="97945bc6-4707-42b5-ace3-113abc710e3e" containerID="79076c939504f1325536395e93ee670f57352524c1972b4dee9b8d73d9659a73" exitCode=143 Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.011255 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97945bc6-4707-42b5-ace3-113abc710e3e","Type":"ContainerDied","Data":"a44f7992069ad5f19bb0410daa9f5292e190e85f2a7f8d50f7b82ee5493e1454"} Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.011290 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97945bc6-4707-42b5-ace3-113abc710e3e","Type":"ContainerDied","Data":"79076c939504f1325536395e93ee670f57352524c1972b4dee9b8d73d9659a73"} Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.018094 4650 generic.go:334] "Generic (PLEG): container finished" podID="1530bd4b-d35a-42ac-b85f-88d790abf462" containerID="32ad1182027a79711c7b05b9e87b965568175662fce9da229bc4f893ac741beb" exitCode=0 Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.018155 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qstsn" event={"ID":"1530bd4b-d35a-42ac-b85f-88d790abf462","Type":"ContainerDied","Data":"32ad1182027a79711c7b05b9e87b965568175662fce9da229bc4f893ac741beb"} Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.022128 4650 generic.go:334] "Generic (PLEG): container finished" podID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerID="7328dc1c02bb9990f7c8772431212dc76e5892644429a10b8063d3eee2081556" exitCode=0 Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.022199 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" event={"ID":"73d8cddc-9598-4160-821f-9f2a594b9eb4","Type":"ContainerDied","Data":"7328dc1c02bb9990f7c8772431212dc76e5892644429a10b8063d3eee2081556"} Feb 01 07:41:26 crc kubenswrapper[4650]: I0201 07:41:26.024703 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a569cd99-6b07-46d7-b2c9-ef80aa27976e","Type":"ContainerStarted","Data":"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e"} Feb 01 07:41:27 crc kubenswrapper[4650]: I0201 07:41:27.950886 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Feb 01 07:41:28 crc kubenswrapper[4650]: I0201 07:41:28.465184 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:41:28 crc kubenswrapper[4650]: E0201 07:41:28.465425 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:41:28 crc kubenswrapper[4650]: E0201 07:41:28.465472 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:42:32.465456834 +0000 UTC m=+1151.188555079 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.394602 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.486222 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cj52\" (UniqueName: \"kubernetes.io/projected/1530bd4b-d35a-42ac-b85f-88d790abf462-kube-api-access-8cj52\") pod \"1530bd4b-d35a-42ac-b85f-88d790abf462\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.486281 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-combined-ca-bundle\") pod \"1530bd4b-d35a-42ac-b85f-88d790abf462\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.486303 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-config-data\") pod \"1530bd4b-d35a-42ac-b85f-88d790abf462\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.486360 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-scripts\") pod \"1530bd4b-d35a-42ac-b85f-88d790abf462\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.486387 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-fernet-keys\") pod \"1530bd4b-d35a-42ac-b85f-88d790abf462\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.486460 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-credential-keys\") pod \"1530bd4b-d35a-42ac-b85f-88d790abf462\" (UID: \"1530bd4b-d35a-42ac-b85f-88d790abf462\") " Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.504200 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1530bd4b-d35a-42ac-b85f-88d790abf462" (UID: "1530bd4b-d35a-42ac-b85f-88d790abf462"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.504389 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1530bd4b-d35a-42ac-b85f-88d790abf462-kube-api-access-8cj52" (OuterVolumeSpecName: "kube-api-access-8cj52") pod "1530bd4b-d35a-42ac-b85f-88d790abf462" (UID: "1530bd4b-d35a-42ac-b85f-88d790abf462"). InnerVolumeSpecName "kube-api-access-8cj52". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.509889 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "1530bd4b-d35a-42ac-b85f-88d790abf462" (UID: "1530bd4b-d35a-42ac-b85f-88d790abf462"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.509966 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-scripts" (OuterVolumeSpecName: "scripts") pod "1530bd4b-d35a-42ac-b85f-88d790abf462" (UID: "1530bd4b-d35a-42ac-b85f-88d790abf462"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.519315 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-config-data" (OuterVolumeSpecName: "config-data") pod "1530bd4b-d35a-42ac-b85f-88d790abf462" (UID: "1530bd4b-d35a-42ac-b85f-88d790abf462"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.523013 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1530bd4b-d35a-42ac-b85f-88d790abf462" (UID: "1530bd4b-d35a-42ac-b85f-88d790abf462"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.589737 4650 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.589798 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cj52\" (UniqueName: \"kubernetes.io/projected/1530bd4b-d35a-42ac-b85f-88d790abf462-kube-api-access-8cj52\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.589811 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.589819 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.589828 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:29 crc kubenswrapper[4650]: I0201 07:41:29.589838 4650 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1530bd4b-d35a-42ac-b85f-88d790abf462-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.087998 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qstsn" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.087760 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qstsn" event={"ID":"1530bd4b-d35a-42ac-b85f-88d790abf462","Type":"ContainerDied","Data":"75520b2e5ff2a8458f00d3deb2b29a59f06846f85e8d464fcb7f8429fb7d4275"} Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.088157 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75520b2e5ff2a8458f00d3deb2b29a59f06846f85e8d464fcb7f8429fb7d4275" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.494042 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-qstsn"] Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.501533 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-qstsn"] Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.592635 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-z4nfj"] Feb 01 07:41:30 crc kubenswrapper[4650]: E0201 07:41:30.593124 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1530bd4b-d35a-42ac-b85f-88d790abf462" containerName="keystone-bootstrap" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.593138 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="1530bd4b-d35a-42ac-b85f-88d790abf462" containerName="keystone-bootstrap" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.593392 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="1530bd4b-d35a-42ac-b85f-88d790abf462" containerName="keystone-bootstrap" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.593990 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.599858 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.600393 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.600479 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.600578 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8zg69" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.601200 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.605569 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-z4nfj"] Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.718286 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdqgc\" (UniqueName: \"kubernetes.io/projected/b0b99236-33b9-4191-8139-8afbda8a3329-kube-api-access-vdqgc\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.718352 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-credential-keys\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.718410 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-fernet-keys\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.718473 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-scripts\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.718573 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-config-data\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.718600 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-combined-ca-bundle\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.820380 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-config-data\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.820420 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-combined-ca-bundle\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.820503 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdqgc\" (UniqueName: \"kubernetes.io/projected/b0b99236-33b9-4191-8139-8afbda8a3329-kube-api-access-vdqgc\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.820524 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-credential-keys\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.820546 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-fernet-keys\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.820587 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-scripts\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.829002 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-combined-ca-bundle\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.830419 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-scripts\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.830950 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-fernet-keys\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.831225 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-config-data\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.838286 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdqgc\" (UniqueName: \"kubernetes.io/projected/b0b99236-33b9-4191-8139-8afbda8a3329-kube-api-access-vdqgc\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.838647 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-credential-keys\") pod \"keystone-bootstrap-z4nfj\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:30 crc kubenswrapper[4650]: I0201 07:41:30.917072 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:41:31 crc kubenswrapper[4650]: I0201 07:41:31.978348 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1530bd4b-d35a-42ac-b85f-88d790abf462" path="/var/lib/kubelet/pods/1530bd4b-d35a-42ac-b85f-88d790abf462/volumes" Feb 01 07:41:32 crc kubenswrapper[4650]: I0201 07:41:32.950399 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Feb 01 07:41:35 crc kubenswrapper[4650]: I0201 07:41:35.318090 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b4d45c6bd-qsdbt"] Feb 01 07:41:37 crc kubenswrapper[4650]: I0201 07:41:37.161095 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:41:37 crc kubenswrapper[4650]: I0201 07:41:37.161547 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:41:37 crc kubenswrapper[4650]: E0201 07:41:37.637989 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Feb 01 07:41:37 crc kubenswrapper[4650]: E0201 07:41:37.638155 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-76h48,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-d4zk4_openstack(9d6a29ee-be36-4454-bf92-6dfffd45687b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:41:37 crc kubenswrapper[4650]: E0201 07:41:37.640839 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-d4zk4" podUID="9d6a29ee-be36-4454-bf92-6dfffd45687b" Feb 01 07:41:37 crc kubenswrapper[4650]: I0201 07:41:37.965169 4650 scope.go:117] "RemoveContainer" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" Feb 01 07:41:37 crc kubenswrapper[4650]: I0201 07:41:37.965320 4650 scope.go:117] "RemoveContainer" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" Feb 01 07:41:37 crc kubenswrapper[4650]: I0201 07:41:37.965412 4650 scope.go:117] "RemoveContainer" containerID="b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595" Feb 01 07:41:37 crc kubenswrapper[4650]: E0201 07:41:37.965788 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:41:38 crc kubenswrapper[4650]: E0201 07:41:38.164781 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-d4zk4" podUID="9d6a29ee-be36-4454-bf92-6dfffd45687b" Feb 01 07:41:39 crc kubenswrapper[4650]: E0201 07:41:39.356369 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Feb 01 07:41:39 crc kubenswrapper[4650]: E0201 07:41:39.358252 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5bfh59fh694h567h8h5ch5h7fh78h657h555h566h554h5bbh5b5h554h587h678h67bh5b7h5d9h586h9bhc8hc4h5d6h79h54fhd4h77h55fh558q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lpmw2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5db79774bc-nc4br_openstack(299a9a2f-8631-455d-8e97-dfc5e8a17734): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:41:39 crc kubenswrapper[4650]: E0201 07:41:39.360830 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5db79774bc-nc4br" podUID="299a9a2f-8631-455d-8e97-dfc5e8a17734" Feb 01 07:41:39 crc kubenswrapper[4650]: E0201 07:41:39.382504 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Feb 01 07:41:39 crc kubenswrapper[4650]: E0201 07:41:39.382629 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n66bh665h68dh645h54hbch554hdbh77h54h57bh697h667hdbh98h5d4h74hd9h66dh544h64h5dbh95h98h68hb7hf6hdh675h5c4h9bh644q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zxg9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-66d9bfcd5-6jkbm_openstack(46e1952e-615d-42f3-891c-d5a6b7cbd50a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:41:39 crc kubenswrapper[4650]: E0201 07:41:39.384528 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-66d9bfcd5-6jkbm" podUID="46e1952e-615d-42f3-891c-d5a6b7cbd50a" Feb 01 07:41:42 crc kubenswrapper[4650]: I0201 07:41:42.950443 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Feb 01 07:41:42 crc kubenswrapper[4650]: I0201 07:41:42.951293 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:41:43 crc kubenswrapper[4650]: I0201 07:41:43.203432 4650 generic.go:334] "Generic (PLEG): container finished" podID="16da8114-b11c-449a-8cf7-17c1980cdcf7" containerID="6c4f59f47f499c833fd050a42711bc0cd8616de68ce9a799bc57cf737b8ca09b" exitCode=0 Feb 01 07:41:43 crc kubenswrapper[4650]: I0201 07:41:43.203476 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5gv78" event={"ID":"16da8114-b11c-449a-8cf7-17c1980cdcf7","Type":"ContainerDied","Data":"6c4f59f47f499c833fd050a42711bc0cd8616de68ce9a799bc57cf737b8ca09b"} Feb 01 07:41:47 crc kubenswrapper[4650]: I0201 07:41:47.951492 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Feb 01 07:41:48 crc kubenswrapper[4650]: I0201 07:41:48.270770 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:48 crc kubenswrapper[4650]: I0201 07:41:48.271098 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.199121 4650 scope.go:117] "RemoveContainer" containerID="d2f715a393c524210c739be45a6809ad759184af4ae9bdac9460496004b7b00d" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.300412 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerStarted","Data":"3991a1f5f7d6ff3bc69e81c8fcf18b986763834ea8fe5c0be7e359e7c44955dc"} Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.308746 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" event={"ID":"73d8cddc-9598-4160-821f-9f2a594b9eb4","Type":"ContainerDied","Data":"4160d2b1eb52f55e5b74178be80f00e55c971baf2b000ff5c2f52adddfd6df0c"} Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.308792 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4160d2b1eb52f55e5b74178be80f00e55c971baf2b000ff5c2f52adddfd6df0c" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.310232 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66d9bfcd5-6jkbm" event={"ID":"46e1952e-615d-42f3-891c-d5a6b7cbd50a","Type":"ContainerDied","Data":"8554b8fcc9bbe19e56bbac95d5f08cbd3e8049f3450e15e2193ca81a7ae83481"} Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.310259 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8554b8fcc9bbe19e56bbac95d5f08cbd3e8049f3450e15e2193ca81a7ae83481" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.311810 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5db79774bc-nc4br" event={"ID":"299a9a2f-8631-455d-8e97-dfc5e8a17734","Type":"ContainerDied","Data":"4ffde89b368c78683d95328b4194957cefea15af052cf56d8f1f4244cf895ec6"} Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.311835 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ffde89b368c78683d95328b4194957cefea15af052cf56d8f1f4244cf895ec6" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.331642 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97945bc6-4707-42b5-ace3-113abc710e3e","Type":"ContainerDied","Data":"2f62a1cce6a20e84531d4aa2737bd09c0fa99cf64e4ceef5a322b1d50e134c0d"} Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.331691 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f62a1cce6a20e84531d4aa2737bd09c0fa99cf64e4ceef5a322b1d50e134c0d" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.333691 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5gv78" event={"ID":"16da8114-b11c-449a-8cf7-17c1980cdcf7","Type":"ContainerDied","Data":"044d19a61f78006756054e086ba5a0ccb8e22a6cf385124abc58f3c4a587ba30"} Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.333719 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="044d19a61f78006756054e086ba5a0ccb8e22a6cf385124abc58f3c4a587ba30" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.403275 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.424888 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.435885 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.445445 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.475986 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-sb\") pod \"73d8cddc-9598-4160-821f-9f2a594b9eb4\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.476085 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-dns-svc\") pod \"73d8cddc-9598-4160-821f-9f2a594b9eb4\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.476133 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-nb\") pod \"73d8cddc-9598-4160-821f-9f2a594b9eb4\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.476207 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64w2l\" (UniqueName: \"kubernetes.io/projected/73d8cddc-9598-4160-821f-9f2a594b9eb4-kube-api-access-64w2l\") pod \"73d8cddc-9598-4160-821f-9f2a594b9eb4\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.476254 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-config\") pod \"73d8cddc-9598-4160-821f-9f2a594b9eb4\" (UID: \"73d8cddc-9598-4160-821f-9f2a594b9eb4\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.480822 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.523421 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73d8cddc-9598-4160-821f-9f2a594b9eb4-kube-api-access-64w2l" (OuterVolumeSpecName: "kube-api-access-64w2l") pod "73d8cddc-9598-4160-821f-9f2a594b9eb4" (UID: "73d8cddc-9598-4160-821f-9f2a594b9eb4"). InnerVolumeSpecName "kube-api-access-64w2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.558913 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "73d8cddc-9598-4160-821f-9f2a594b9eb4" (UID: "73d8cddc-9598-4160-821f-9f2a594b9eb4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.562435 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "73d8cddc-9598-4160-821f-9f2a594b9eb4" (UID: "73d8cddc-9598-4160-821f-9f2a594b9eb4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.573955 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "73d8cddc-9598-4160-821f-9f2a594b9eb4" (UID: "73d8cddc-9598-4160-821f-9f2a594b9eb4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577525 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-config-data\") pod \"299a9a2f-8631-455d-8e97-dfc5e8a17734\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577587 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-combined-ca-bundle\") pod \"16da8114-b11c-449a-8cf7-17c1980cdcf7\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577609 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577633 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-config\") pod \"16da8114-b11c-449a-8cf7-17c1980cdcf7\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577659 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/299a9a2f-8631-455d-8e97-dfc5e8a17734-horizon-secret-key\") pod \"299a9a2f-8631-455d-8e97-dfc5e8a17734\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577676 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-scripts\") pod \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577689 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-scripts\") pod \"299a9a2f-8631-455d-8e97-dfc5e8a17734\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577722 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46e1952e-615d-42f3-891c-d5a6b7cbd50a-horizon-secret-key\") pod \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577739 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-internal-tls-certs\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577759 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpmw2\" (UniqueName: \"kubernetes.io/projected/299a9a2f-8631-455d-8e97-dfc5e8a17734-kube-api-access-lpmw2\") pod \"299a9a2f-8631-455d-8e97-dfc5e8a17734\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577795 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-config-data\") pod \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577820 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55lp9\" (UniqueName: \"kubernetes.io/projected/16da8114-b11c-449a-8cf7-17c1980cdcf7-kube-api-access-55lp9\") pod \"16da8114-b11c-449a-8cf7-17c1980cdcf7\" (UID: \"16da8114-b11c-449a-8cf7-17c1980cdcf7\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577847 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9872m\" (UniqueName: \"kubernetes.io/projected/97945bc6-4707-42b5-ace3-113abc710e3e-kube-api-access-9872m\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.577880 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/299a9a2f-8631-455d-8e97-dfc5e8a17734-logs\") pod \"299a9a2f-8631-455d-8e97-dfc5e8a17734\" (UID: \"299a9a2f-8631-455d-8e97-dfc5e8a17734\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.578489 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-config-data" (OuterVolumeSpecName: "config-data") pod "299a9a2f-8631-455d-8e97-dfc5e8a17734" (UID: "299a9a2f-8631-455d-8e97-dfc5e8a17734"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.579311 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-config-data" (OuterVolumeSpecName: "config-data") pod "46e1952e-615d-42f3-891c-d5a6b7cbd50a" (UID: "46e1952e-615d-42f3-891c-d5a6b7cbd50a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.579833 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-scripts" (OuterVolumeSpecName: "scripts") pod "46e1952e-615d-42f3-891c-d5a6b7cbd50a" (UID: "46e1952e-615d-42f3-891c-d5a6b7cbd50a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.581896 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-config-data\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.581931 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-combined-ca-bundle\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.581963 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-httpd-run\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.581969 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/299a9a2f-8631-455d-8e97-dfc5e8a17734-logs" (OuterVolumeSpecName: "logs") pod "299a9a2f-8631-455d-8e97-dfc5e8a17734" (UID: "299a9a2f-8631-455d-8e97-dfc5e8a17734"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582001 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxg9h\" (UniqueName: \"kubernetes.io/projected/46e1952e-615d-42f3-891c-d5a6b7cbd50a-kube-api-access-zxg9h\") pod \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582039 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-scripts\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582059 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46e1952e-615d-42f3-891c-d5a6b7cbd50a-logs\") pod \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\" (UID: \"46e1952e-615d-42f3-891c-d5a6b7cbd50a\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582075 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-logs\") pod \"97945bc6-4707-42b5-ace3-113abc710e3e\" (UID: \"97945bc6-4707-42b5-ace3-113abc710e3e\") " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582502 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-scripts" (OuterVolumeSpecName: "scripts") pod "299a9a2f-8631-455d-8e97-dfc5e8a17734" (UID: "299a9a2f-8631-455d-8e97-dfc5e8a17734"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582608 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/299a9a2f-8631-455d-8e97-dfc5e8a17734-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "299a9a2f-8631-455d-8e97-dfc5e8a17734" (UID: "299a9a2f-8631-455d-8e97-dfc5e8a17734"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582654 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582665 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/299a9a2f-8631-455d-8e97-dfc5e8a17734-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582773 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582783 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582792 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64w2l\" (UniqueName: \"kubernetes.io/projected/73d8cddc-9598-4160-821f-9f2a594b9eb4-kube-api-access-64w2l\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582803 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582813 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.582858 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.583107 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/299a9a2f-8631-455d-8e97-dfc5e8a17734-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.583122 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46e1952e-615d-42f3-891c-d5a6b7cbd50a-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.587312 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46e1952e-615d-42f3-891c-d5a6b7cbd50a-logs" (OuterVolumeSpecName: "logs") pod "46e1952e-615d-42f3-891c-d5a6b7cbd50a" (UID: "46e1952e-615d-42f3-891c-d5a6b7cbd50a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.588357 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46e1952e-615d-42f3-891c-d5a6b7cbd50a-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "46e1952e-615d-42f3-891c-d5a6b7cbd50a" (UID: "46e1952e-615d-42f3-891c-d5a6b7cbd50a"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.588912 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-scripts" (OuterVolumeSpecName: "scripts") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.589126 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-logs" (OuterVolumeSpecName: "logs") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.591335 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/299a9a2f-8631-455d-8e97-dfc5e8a17734-kube-api-access-lpmw2" (OuterVolumeSpecName: "kube-api-access-lpmw2") pod "299a9a2f-8631-455d-8e97-dfc5e8a17734" (UID: "299a9a2f-8631-455d-8e97-dfc5e8a17734"). InnerVolumeSpecName "kube-api-access-lpmw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.591377 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16da8114-b11c-449a-8cf7-17c1980cdcf7-kube-api-access-55lp9" (OuterVolumeSpecName: "kube-api-access-55lp9") pod "16da8114-b11c-449a-8cf7-17c1980cdcf7" (UID: "16da8114-b11c-449a-8cf7-17c1980cdcf7"). InnerVolumeSpecName "kube-api-access-55lp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.591672 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.592793 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97945bc6-4707-42b5-ace3-113abc710e3e-kube-api-access-9872m" (OuterVolumeSpecName: "kube-api-access-9872m") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "kube-api-access-9872m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.593220 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-config" (OuterVolumeSpecName: "config") pod "73d8cddc-9598-4160-821f-9f2a594b9eb4" (UID: "73d8cddc-9598-4160-821f-9f2a594b9eb4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.593258 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46e1952e-615d-42f3-891c-d5a6b7cbd50a-kube-api-access-zxg9h" (OuterVolumeSpecName: "kube-api-access-zxg9h") pod "46e1952e-615d-42f3-891c-d5a6b7cbd50a" (UID: "46e1952e-615d-42f3-891c-d5a6b7cbd50a"). InnerVolumeSpecName "kube-api-access-zxg9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.619240 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-config" (OuterVolumeSpecName: "config") pod "16da8114-b11c-449a-8cf7-17c1980cdcf7" (UID: "16da8114-b11c-449a-8cf7-17c1980cdcf7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.630007 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.632428 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16da8114-b11c-449a-8cf7-17c1980cdcf7" (UID: "16da8114-b11c-449a-8cf7-17c1980cdcf7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.632918 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-config-data" (OuterVolumeSpecName: "config-data") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.644565 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "97945bc6-4707-42b5-ace3-113abc710e3e" (UID: "97945bc6-4707-42b5-ace3-113abc710e3e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684238 4650 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/299a9a2f-8631-455d-8e97-dfc5e8a17734-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684268 4650 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46e1952e-615d-42f3-891c-d5a6b7cbd50a-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684277 4650 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684286 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpmw2\" (UniqueName: \"kubernetes.io/projected/299a9a2f-8631-455d-8e97-dfc5e8a17734-kube-api-access-lpmw2\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684299 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55lp9\" (UniqueName: \"kubernetes.io/projected/16da8114-b11c-449a-8cf7-17c1980cdcf7-kube-api-access-55lp9\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684310 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9872m\" (UniqueName: \"kubernetes.io/projected/97945bc6-4707-42b5-ace3-113abc710e3e-kube-api-access-9872m\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684318 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684326 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684334 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684342 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxg9h\" (UniqueName: \"kubernetes.io/projected/46e1952e-615d-42f3-891c-d5a6b7cbd50a-kube-api-access-zxg9h\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684350 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97945bc6-4707-42b5-ace3-113abc710e3e-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684359 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46e1952e-615d-42f3-891c-d5a6b7cbd50a-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684368 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97945bc6-4707-42b5-ace3-113abc710e3e-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684376 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d8cddc-9598-4160-821f-9f2a594b9eb4-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684384 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684412 4650 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.684421 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/16da8114-b11c-449a-8cf7-17c1980cdcf7-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.700830 4650 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Feb 01 07:41:51 crc kubenswrapper[4650]: I0201 07:41:51.786375 4650 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.339132 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db79774bc-nc4br" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.339217 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.339208 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5gv78" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.339251 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.339157 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d9bfcd5-6jkbm" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.382458 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.385073 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.409918 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:52 crc kubenswrapper[4650]: E0201 07:41:52.410320 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410338 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" Feb 01 07:41:52 crc kubenswrapper[4650]: E0201 07:41:52.410358 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-httpd" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410365 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-httpd" Feb 01 07:41:52 crc kubenswrapper[4650]: E0201 07:41:52.410374 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-log" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410380 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-log" Feb 01 07:41:52 crc kubenswrapper[4650]: E0201 07:41:52.410399 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16da8114-b11c-449a-8cf7-17c1980cdcf7" containerName="neutron-db-sync" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410405 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="16da8114-b11c-449a-8cf7-17c1980cdcf7" containerName="neutron-db-sync" Feb 01 07:41:52 crc kubenswrapper[4650]: E0201 07:41:52.410414 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="init" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410419 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="init" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410576 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410588 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-log" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410609 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="16da8114-b11c-449a-8cf7-17c1980cdcf7" containerName="neutron-db-sync" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.410617 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" containerName="glance-httpd" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.411615 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.420905 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.421128 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.427664 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hjbmr"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.449913 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.462065 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hjbmr"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.487560 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-66d9bfcd5-6jkbm"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.495919 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-66d9bfcd5-6jkbm"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520628 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520695 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520752 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520798 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520833 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520856 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520897 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-logs\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.520930 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kmtm\" (UniqueName: \"kubernetes.io/projected/43fb6cca-a9d3-4205-a078-847687c48f0b-kube-api-access-4kmtm\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.537530 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5db79774bc-nc4br"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.543687 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5db79774bc-nc4br"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622224 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622297 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622337 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622363 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622381 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622402 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-logs\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622446 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kmtm\" (UniqueName: \"kubernetes.io/projected/43fb6cca-a9d3-4205-a078-847687c48f0b-kube-api-access-4kmtm\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622452 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622477 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.622706 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.623544 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-logs\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.631914 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.632263 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.633252 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.642236 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.655901 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kmtm\" (UniqueName: \"kubernetes.io/projected/43fb6cca-a9d3-4205-a078-847687c48f0b-kube-api-access-4kmtm\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.686394 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.731923 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.733952 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b6c948c7-clpjc"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.735286 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.767204 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b6c948c7-clpjc"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.825106 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-nb\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.825195 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-sb\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.825233 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-config\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.825253 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t84nv\" (UniqueName: \"kubernetes.io/projected/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-kube-api-access-t84nv\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.825280 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-dns-svc\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.893195 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-85ff8d5c86-hrgmh"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.894430 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.897177 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qnnk9" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.897333 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.897476 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.898291 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.902016 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85ff8d5c86-hrgmh"] Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.927635 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-nb\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.927724 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-sb\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.927760 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-config\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.927780 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t84nv\" (UniqueName: \"kubernetes.io/projected/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-kube-api-access-t84nv\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.927803 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-dns-svc\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.928667 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-dns-svc\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.929229 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-nb\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.929717 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-sb\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.930222 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-config\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.952483 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-hjbmr" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.966005 4650 scope.go:117] "RemoveContainer" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.966084 4650 scope.go:117] "RemoveContainer" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.966170 4650 scope.go:117] "RemoveContainer" containerID="b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595" Feb 01 07:41:52 crc kubenswrapper[4650]: E0201 07:41:52.966436 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:41:52 crc kubenswrapper[4650]: I0201 07:41:52.969436 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t84nv\" (UniqueName: \"kubernetes.io/projected/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-kube-api-access-t84nv\") pod \"dnsmasq-dns-b6c948c7-clpjc\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.029196 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-httpd-config\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.029276 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-combined-ca-bundle\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.029318 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-ovndb-tls-certs\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.029346 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwjdk\" (UniqueName: \"kubernetes.io/projected/d8cab23a-57a2-432e-9aa8-1ffc44434d58-kube-api-access-bwjdk\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.029375 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-config\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.081094 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.130634 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-httpd-config\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.130728 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-combined-ca-bundle\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.130790 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-ovndb-tls-certs\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.130837 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwjdk\" (UniqueName: \"kubernetes.io/projected/d8cab23a-57a2-432e-9aa8-1ffc44434d58-kube-api-access-bwjdk\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.130861 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-config\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.135815 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-httpd-config\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.146387 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-combined-ca-bundle\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.146706 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-config\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.148062 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwjdk\" (UniqueName: \"kubernetes.io/projected/d8cab23a-57a2-432e-9aa8-1ffc44434d58-kube-api-access-bwjdk\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.149407 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-ovndb-tls-certs\") pod \"neutron-85ff8d5c86-hrgmh\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.212214 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:53 crc kubenswrapper[4650]: E0201 07:41:53.310675 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Feb 01 07:41:53 crc kubenswrapper[4650]: E0201 07:41:53.310860 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jdsvl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-x99cv_openstack(00154668-79cc-4c4d-81f9-e7975168f700): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:41:53 crc kubenswrapper[4650]: E0201 07:41:53.312002 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-x99cv" podUID="00154668-79cc-4c4d-81f9-e7975168f700" Feb 01 07:41:53 crc kubenswrapper[4650]: E0201 07:41:53.349714 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-x99cv" podUID="00154668-79cc-4c4d-81f9-e7975168f700" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.975964 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="299a9a2f-8631-455d-8e97-dfc5e8a17734" path="/var/lib/kubelet/pods/299a9a2f-8631-455d-8e97-dfc5e8a17734/volumes" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.976668 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46e1952e-615d-42f3-891c-d5a6b7cbd50a" path="/var/lib/kubelet/pods/46e1952e-615d-42f3-891c-d5a6b7cbd50a/volumes" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.977020 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73d8cddc-9598-4160-821f-9f2a594b9eb4" path="/var/lib/kubelet/pods/73d8cddc-9598-4160-821f-9f2a594b9eb4/volumes" Feb 01 07:41:53 crc kubenswrapper[4650]: I0201 07:41:53.977779 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97945bc6-4707-42b5-ace3-113abc710e3e" path="/var/lib/kubelet/pods/97945bc6-4707-42b5-ace3-113abc710e3e/volumes" Feb 01 07:41:54 crc kubenswrapper[4650]: E0201 07:41:54.044445 4650 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Feb 01 07:41:54 crc kubenswrapper[4650]: E0201 07:41:54.044575 4650 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6vr6s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-kzjnq_openstack(2208b1dc-dbac-498a-a760-21257b722e80): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 01 07:41:54 crc kubenswrapper[4650]: E0201 07:41:54.045661 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-kzjnq" podUID="2208b1dc-dbac-498a-a760-21257b722e80" Feb 01 07:41:54 crc kubenswrapper[4650]: I0201 07:41:54.138683 4650 scope.go:117] "RemoveContainer" containerID="b4fe5ae45d159834a9c667093e751e227a564d39ec95f64cc51d8b99cc229ac8" Feb 01 07:41:54 crc kubenswrapper[4650]: E0201 07:41:54.480621 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-kzjnq" podUID="2208b1dc-dbac-498a-a760-21257b722e80" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.030292 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85ff8d5c86-hrgmh"] Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.057318 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-z4nfj"] Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.128344 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-78c5fb6df7-xcnvd"] Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.129879 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.132809 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-78c5fb6df7-xcnvd"] Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.136827 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.136905 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.210573 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-ovndb-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.210710 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-combined-ca-bundle\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.210743 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-httpd-config\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.210764 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-config\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.210790 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-internal-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.210813 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcr6t\" (UniqueName: \"kubernetes.io/projected/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-kube-api-access-hcr6t\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.210909 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-public-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.292799 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79fd8b5f84-qg9cv"] Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.312163 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-public-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.312247 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-ovndb-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.312330 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-combined-ca-bundle\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.312370 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-httpd-config\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.312391 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-config\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.312411 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-internal-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.312448 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcr6t\" (UniqueName: \"kubernetes.io/projected/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-kube-api-access-hcr6t\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.320362 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-httpd-config\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.321393 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-public-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.323930 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-combined-ca-bundle\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.327492 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-ovndb-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.328891 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-config\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.329443 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-internal-tls-certs\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.353719 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcr6t\" (UniqueName: \"kubernetes.io/projected/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-kube-api-access-hcr6t\") pod \"neutron-78c5fb6df7-xcnvd\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.377211 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b6c948c7-clpjc"] Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.453971 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.457531 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.494192 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a569cd99-6b07-46d7-b2c9-ef80aa27976e","Type":"ContainerStarted","Data":"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.494927 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-log" containerID="cri-o://530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e" gracePeriod=30 Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.495328 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-httpd" containerID="cri-o://9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597" gracePeriod=30 Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.528842 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4zk4" event={"ID":"9d6a29ee-be36-4454-bf92-6dfffd45687b","Type":"ContainerStarted","Data":"23a50bfc95a722d92ac978351b6831fe4a48bd989557bce3777ce7609251cbe2"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.538715 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=33.538685507 podStartE2EDuration="33.538685507s" podCreationTimestamp="2026-02-01 07:41:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:55.521798457 +0000 UTC m=+1114.244896702" watchObservedRunningTime="2026-02-01 07:41:55.538685507 +0000 UTC m=+1114.261783752" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.549226 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-695d6f76c-qccxs" event={"ID":"f3f97afc-40d4-4fc4-be00-1280202c0a31","Type":"ContainerStarted","Data":"112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.549265 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-695d6f76c-qccxs" event={"ID":"f3f97afc-40d4-4fc4-be00-1280202c0a31","Type":"ContainerStarted","Data":"92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.549381 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-695d6f76c-qccxs" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon-log" containerID="cri-o://92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10" gracePeriod=30 Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.549601 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-695d6f76c-qccxs" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon" containerID="cri-o://112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce" gracePeriod=30 Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.555660 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerStarted","Data":"038fc80dfb9fd47b73607b6e75c77545e7d8c10ea25cbba2f578bdb2c48b96af"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.555682 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerStarted","Data":"121bf0c27d3cd2492d3454ae6a47181d459961964a1aefdd883d489176849870"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.560179 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79fd8b5f84-qg9cv" event={"ID":"9c4bad14-279f-4212-a86d-cea1c9fe7b48","Type":"ContainerStarted","Data":"d7ee8efb2f84ce27439bf6b9cbb46c36307bf2818654d03385026864d7df9476"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.568285 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-d4zk4" podStartSLOduration=3.654027002 podStartE2EDuration="41.568268868s" podCreationTimestamp="2026-02-01 07:41:14 +0000 UTC" firstStartedPulling="2026-02-01 07:41:16.570917062 +0000 UTC m=+1075.294015307" lastFinishedPulling="2026-02-01 07:41:54.485158928 +0000 UTC m=+1113.208257173" observedRunningTime="2026-02-01 07:41:55.565714662 +0000 UTC m=+1114.288812917" watchObservedRunningTime="2026-02-01 07:41:55.568268868 +0000 UTC m=+1114.291367113" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.571510 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nfj" event={"ID":"b0b99236-33b9-4191-8139-8afbda8a3329","Type":"ContainerStarted","Data":"e4f022ddfdbdf21edede9cc3d45f026e5cfa5544faac89746b88c86202777a01"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.579123 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fb6cca-a9d3-4205-a078-847687c48f0b","Type":"ContainerStarted","Data":"6bc366f856ccc93b09d1ea04c34fe8a683e2d347f24c316be3e841dc887be5b4"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.597655 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5b4d45c6bd-qsdbt" podStartSLOduration=32.597631644 podStartE2EDuration="32.597631644s" podCreationTimestamp="2026-02-01 07:41:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:55.591430922 +0000 UTC m=+1114.314529167" watchObservedRunningTime="2026-02-01 07:41:55.597631644 +0000 UTC m=+1114.320729889" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.611312 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerStarted","Data":"eab2f92a1e1098a97d366301d4866d1822f89b35f7a7301a8d37606efcc8d925"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.621090 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-695d6f76c-qccxs" podStartSLOduration=8.065239933 podStartE2EDuration="42.621073885s" podCreationTimestamp="2026-02-01 07:41:13 +0000 UTC" firstStartedPulling="2026-02-01 07:41:16.698474535 +0000 UTC m=+1075.421572780" lastFinishedPulling="2026-02-01 07:41:51.254308477 +0000 UTC m=+1109.977406732" observedRunningTime="2026-02-01 07:41:55.613034936 +0000 UTC m=+1114.336133191" watchObservedRunningTime="2026-02-01 07:41:55.621073885 +0000 UTC m=+1114.344172130" Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.655509 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" event={"ID":"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94","Type":"ContainerStarted","Data":"aaa4b5b0660cbc9a9b21e6e02c19f49b54bc99b51f3ba2c146d4b31b70bb6040"} Feb 01 07:41:55 crc kubenswrapper[4650]: I0201 07:41:55.689317 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerStarted","Data":"c727b8fe9386e32e0f907f7dd32d6dc2f2f2b3f3323c8ca2015c7aa290fbab08"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.236081 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-78c5fb6df7-xcnvd"] Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.560204 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.658724 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-config-data\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.658770 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-public-tls-certs\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.658830 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-scripts\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.658873 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9glhx\" (UniqueName: \"kubernetes.io/projected/a569cd99-6b07-46d7-b2c9-ef80aa27976e-kube-api-access-9glhx\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.658903 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-httpd-run\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.658938 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.658957 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-logs\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.659053 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-combined-ca-bundle\") pod \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\" (UID: \"a569cd99-6b07-46d7-b2c9-ef80aa27976e\") " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.662410 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-logs" (OuterVolumeSpecName: "logs") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.665441 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a569cd99-6b07-46d7-b2c9-ef80aa27976e-kube-api-access-9glhx" (OuterVolumeSpecName: "kube-api-access-9glhx") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "kube-api-access-9glhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.667329 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.685213 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-scripts" (OuterVolumeSpecName: "scripts") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.686597 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.745145 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.750291 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nfj" event={"ID":"b0b99236-33b9-4191-8139-8afbda8a3329","Type":"ContainerStarted","Data":"99550a12eafed3327c442223efb94cccfd671910766cf2c64f63138a238ccfb4"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.761467 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.761488 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9glhx\" (UniqueName: \"kubernetes.io/projected/a569cd99-6b07-46d7-b2c9-ef80aa27976e-kube-api-access-9glhx\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.761500 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.761520 4650 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.761530 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a569cd99-6b07-46d7-b2c9-ef80aa27976e-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.761538 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.768607 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5fb6df7-xcnvd" event={"ID":"7bcbe498-d2bb-4ad5-87dd-f2896380acfe","Type":"ContainerStarted","Data":"cb2f8af6d7f431c3f8fcdccb2f2945cbb5fed4481dbae22bd46f8b30ef76af31"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.781681 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerStarted","Data":"d8816352790cba7d5686ac0765007920429b950912d7a24e2d6cf3eabb7bc5db"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.781718 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerStarted","Data":"d3b2ef07e4b18ed02881615aefe34055c60d3300d0846ea348409e2dfae6c9ae"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.781855 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.783921 4650 generic.go:334] "Generic (PLEG): container finished" podID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerID="07ad40af5d9300bc4229c0e95af4124aa8bd3c4fa7b046b90f5e8e213bf8f783" exitCode=0 Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.783971 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" event={"ID":"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94","Type":"ContainerDied","Data":"07ad40af5d9300bc4229c0e95af4124aa8bd3c4fa7b046b90f5e8e213bf8f783"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.795105 4650 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.800743 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-z4nfj" podStartSLOduration=26.800726743 podStartE2EDuration="26.800726743s" podCreationTimestamp="2026-02-01 07:41:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:56.798602517 +0000 UTC m=+1115.521700762" watchObservedRunningTime="2026-02-01 07:41:56.800726743 +0000 UTC m=+1115.523824988" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.802892 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79fd8b5f84-qg9cv" event={"ID":"9c4bad14-279f-4212-a86d-cea1c9fe7b48","Type":"ContainerStarted","Data":"e3cebc6d6781572d06c66f70f152f9956e68027b5900448693f2f7b809a6fd77"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.802918 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79fd8b5f84-qg9cv" event={"ID":"9c4bad14-279f-4212-a86d-cea1c9fe7b48","Type":"ContainerStarted","Data":"db2eab233a712cba15c694e67b3d6f581706dba9f974712d96d7bc876e4f3dfd"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.808824 4650 generic.go:334] "Generic (PLEG): container finished" podID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerID="9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597" exitCode=143 Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.808871 4650 generic.go:334] "Generic (PLEG): container finished" podID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerID="530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e" exitCode=143 Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.808892 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.808970 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a569cd99-6b07-46d7-b2c9-ef80aa27976e","Type":"ContainerDied","Data":"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.809002 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a569cd99-6b07-46d7-b2c9-ef80aa27976e","Type":"ContainerDied","Data":"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.809013 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a569cd99-6b07-46d7-b2c9-ef80aa27976e","Type":"ContainerDied","Data":"76195e72f53686f389803c79ef4985ffbc5f9354f481285e90c4c663f17e726e"} Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.809517 4650 scope.go:117] "RemoveContainer" containerID="9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.815667 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.856446 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-config-data" (OuterVolumeSpecName: "config-data") pod "a569cd99-6b07-46d7-b2c9-ef80aa27976e" (UID: "a569cd99-6b07-46d7-b2c9-ef80aa27976e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.856866 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-85ff8d5c86-hrgmh" podStartSLOduration=4.856850566 podStartE2EDuration="4.856850566s" podCreationTimestamp="2026-02-01 07:41:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:56.849216247 +0000 UTC m=+1115.572314502" watchObservedRunningTime="2026-02-01 07:41:56.856850566 +0000 UTC m=+1115.579948811" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.864088 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.864115 4650 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a569cd99-6b07-46d7-b2c9-ef80aa27976e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.864126 4650 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.866946 4650 scope.go:117] "RemoveContainer" containerID="530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.906553 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-79fd8b5f84-qg9cv" podStartSLOduration=33.906537081 podStartE2EDuration="33.906537081s" podCreationTimestamp="2026-02-01 07:41:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:56.904446387 +0000 UTC m=+1115.627544642" watchObservedRunningTime="2026-02-01 07:41:56.906537081 +0000 UTC m=+1115.629635346" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.938200 4650 scope.go:117] "RemoveContainer" containerID="9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597" Feb 01 07:41:56 crc kubenswrapper[4650]: E0201 07:41:56.943186 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597\": container with ID starting with 9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597 not found: ID does not exist" containerID="9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.943236 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597"} err="failed to get container status \"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597\": rpc error: code = NotFound desc = could not find container \"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597\": container with ID starting with 9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597 not found: ID does not exist" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.943262 4650 scope.go:117] "RemoveContainer" containerID="530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e" Feb 01 07:41:56 crc kubenswrapper[4650]: E0201 07:41:56.950230 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e\": container with ID starting with 530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e not found: ID does not exist" containerID="530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.950274 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e"} err="failed to get container status \"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e\": rpc error: code = NotFound desc = could not find container \"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e\": container with ID starting with 530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e not found: ID does not exist" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.950302 4650 scope.go:117] "RemoveContainer" containerID="9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.951224 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597"} err="failed to get container status \"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597\": rpc error: code = NotFound desc = could not find container \"9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597\": container with ID starting with 9acbb2300cac1b9c5464b0007fd4e2bda5d0295aa2393772efb9deb5308cd597 not found: ID does not exist" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.951266 4650 scope.go:117] "RemoveContainer" containerID="530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e" Feb 01 07:41:56 crc kubenswrapper[4650]: I0201 07:41:56.953811 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e"} err="failed to get container status \"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e\": rpc error: code = NotFound desc = could not find container \"530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e\": container with ID starting with 530097e690790a94f1ac08593a223df0d892c0171b460d078e0a0fc1300a335e not found: ID does not exist" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.158302 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.174129 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.183330 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:57 crc kubenswrapper[4650]: E0201 07:41:57.188331 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-httpd" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.188363 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-httpd" Feb 01 07:41:57 crc kubenswrapper[4650]: E0201 07:41:57.188390 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-log" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.188396 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-log" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.188642 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-log" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.188670 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" containerName="glance-httpd" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.189843 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.194257 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.194403 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.217727 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372328 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372379 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372409 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372462 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-logs\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372488 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372510 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdb9p\" (UniqueName: \"kubernetes.io/projected/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-kube-api-access-cdb9p\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372542 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.372569 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.473973 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.474379 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.474410 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.474437 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.474491 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-logs\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.474516 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.474536 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdb9p\" (UniqueName: \"kubernetes.io/projected/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-kube-api-access-cdb9p\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.474564 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.475558 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.478328 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-logs\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.478655 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.479752 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.483825 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.487222 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.516619 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.516906 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdb9p\" (UniqueName: \"kubernetes.io/projected/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-kube-api-access-cdb9p\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.526381 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.828103 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.856453 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fb6cca-a9d3-4205-a078-847687c48f0b","Type":"ContainerStarted","Data":"89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24"} Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.862202 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5fb6df7-xcnvd" event={"ID":"7bcbe498-d2bb-4ad5-87dd-f2896380acfe","Type":"ContainerStarted","Data":"2d7be25a36bfba8dcfd682fcb26b455bbe9583c5afec760041cc8bcb3f5e8526"} Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.862235 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5fb6df7-xcnvd" event={"ID":"7bcbe498-d2bb-4ad5-87dd-f2896380acfe","Type":"ContainerStarted","Data":"cca07a3ed44421c1c2ddae2db88b3fa3f088622070c2075bd1008da4c9b836d4"} Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.862687 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.869546 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/0.log" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.870532 4650 generic.go:334] "Generic (PLEG): container finished" podID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerID="d8816352790cba7d5686ac0765007920429b950912d7a24e2d6cf3eabb7bc5db" exitCode=1 Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.870597 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerDied","Data":"d8816352790cba7d5686ac0765007920429b950912d7a24e2d6cf3eabb7bc5db"} Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.871164 4650 scope.go:117] "RemoveContainer" containerID="d8816352790cba7d5686ac0765007920429b950912d7a24e2d6cf3eabb7bc5db" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.873620 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" event={"ID":"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94","Type":"ContainerStarted","Data":"2e28eb2efeefda2c6ebc232204aea9cb0de3fc1044bde8103db5ef199d1920c4"} Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.874061 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.904988 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-78c5fb6df7-xcnvd" podStartSLOduration=2.904971175 podStartE2EDuration="2.904971175s" podCreationTimestamp="2026-02-01 07:41:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:57.878002272 +0000 UTC m=+1116.601100507" watchObservedRunningTime="2026-02-01 07:41:57.904971175 +0000 UTC m=+1116.628069420" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.957176 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" podStartSLOduration=5.957153136 podStartE2EDuration="5.957153136s" podCreationTimestamp="2026-02-01 07:41:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:57.933684554 +0000 UTC m=+1116.656782799" watchObservedRunningTime="2026-02-01 07:41:57.957153136 +0000 UTC m=+1116.680251381" Feb 01 07:41:57 crc kubenswrapper[4650]: I0201 07:41:57.975464 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a569cd99-6b07-46d7-b2c9-ef80aa27976e" path="/var/lib/kubelet/pods/a569cd99-6b07-46d7-b2c9-ef80aa27976e/volumes" Feb 01 07:41:58 crc kubenswrapper[4650]: I0201 07:41:58.932092 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fb6cca-a9d3-4205-a078-847687c48f0b","Type":"ContainerStarted","Data":"27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd"} Feb 01 07:41:58 crc kubenswrapper[4650]: I0201 07:41:58.966160 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.966143693 podStartE2EDuration="6.966143693s" podCreationTimestamp="2026-02-01 07:41:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:41:58.961280976 +0000 UTC m=+1117.684379221" watchObservedRunningTime="2026-02-01 07:41:58.966143693 +0000 UTC m=+1117.689241938" Feb 01 07:41:59 crc kubenswrapper[4650]: W0201 07:41:59.521732 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ae063fc_da05_4f12_96aa_ea13d37dc9d0.slice/crio-186b99d8c97882526f9d64a391f47b3f338f5986b25474a9b476e640de434b3d WatchSource:0}: Error finding container 186b99d8c97882526f9d64a391f47b3f338f5986b25474a9b476e640de434b3d: Status 404 returned error can't find the container with id 186b99d8c97882526f9d64a391f47b3f338f5986b25474a9b476e640de434b3d Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.535615 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.946885 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerStarted","Data":"e8583e40abe46d8cdc8a77461093489f5ae34b53d356d113956d6178480920dc"} Feb 01 07:41:59 crc kubenswrapper[4650]: E0201 07:41:59.949210 4650 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8cab23a_57a2_432e_9aa8_1ffc44434d58.slice/crio-conmon-6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8cab23a_57a2_432e_9aa8_1ffc44434d58.slice/crio-6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd.scope\": RecentStats: unable to find data in memory cache]" Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.950269 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5ae063fc-da05-4f12-96aa-ea13d37dc9d0","Type":"ContainerStarted","Data":"186b99d8c97882526f9d64a391f47b3f338f5986b25474a9b476e640de434b3d"} Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.954202 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/1.log" Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.955857 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/0.log" Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.956343 4650 generic.go:334] "Generic (PLEG): container finished" podID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerID="6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd" exitCode=1 Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.956643 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerDied","Data":"6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd"} Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.956732 4650 scope.go:117] "RemoveContainer" containerID="d8816352790cba7d5686ac0765007920429b950912d7a24e2d6cf3eabb7bc5db" Feb 01 07:41:59 crc kubenswrapper[4650]: I0201 07:41:59.957406 4650 scope.go:117] "RemoveContainer" containerID="6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd" Feb 01 07:41:59 crc kubenswrapper[4650]: E0201 07:41:59.957739 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"neutron-httpd\" with CrashLoopBackOff: \"back-off 10s restarting failed container=neutron-httpd pod=neutron-85ff8d5c86-hrgmh_openstack(d8cab23a-57a2-432e-9aa8-1ffc44434d58)\"" pod="openstack/neutron-85ff8d5c86-hrgmh" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" Feb 01 07:42:00 crc kubenswrapper[4650]: I0201 07:42:00.994243 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/1.log" Feb 01 07:42:01 crc kubenswrapper[4650]: I0201 07:42:00.998138 4650 scope.go:117] "RemoveContainer" containerID="6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd" Feb 01 07:42:01 crc kubenswrapper[4650]: E0201 07:42:00.998312 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"neutron-httpd\" with CrashLoopBackOff: \"back-off 10s restarting failed container=neutron-httpd pod=neutron-85ff8d5c86-hrgmh_openstack(d8cab23a-57a2-432e-9aa8-1ffc44434d58)\"" pod="openstack/neutron-85ff8d5c86-hrgmh" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" Feb 01 07:42:01 crc kubenswrapper[4650]: I0201 07:42:01.004868 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5ae063fc-da05-4f12-96aa-ea13d37dc9d0","Type":"ContainerStarted","Data":"1c935daf8b1d75b92fd142680e79786dd3107c04ab2cc97709cc266abfbf30ee"} Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.043498 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5ae063fc-da05-4f12-96aa-ea13d37dc9d0","Type":"ContainerStarted","Data":"a4fa062d160c18a5a3f6edd669ffb4eb1188180e2527d0ca9eb14320da88a755"} Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.058413 4650 generic.go:334] "Generic (PLEG): container finished" podID="9d6a29ee-be36-4454-bf92-6dfffd45687b" containerID="23a50bfc95a722d92ac978351b6831fe4a48bd989557bce3777ce7609251cbe2" exitCode=0 Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.058570 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4zk4" event={"ID":"9d6a29ee-be36-4454-bf92-6dfffd45687b","Type":"ContainerDied","Data":"23a50bfc95a722d92ac978351b6831fe4a48bd989557bce3777ce7609251cbe2"} Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.068395 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.068370901 podStartE2EDuration="5.068370901s" podCreationTimestamp="2026-02-01 07:41:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:02.065112226 +0000 UTC m=+1120.788210481" watchObservedRunningTime="2026-02-01 07:42:02.068370901 +0000 UTC m=+1120.791469146" Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.732507 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.732780 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.831221 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:02 crc kubenswrapper[4650]: I0201 07:42:02.898303 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.076418 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.076500 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.083144 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.153954 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-84j7d"] Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.154191 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56798b757f-84j7d" podUID="dfb53357-604b-407e-8577-36288efeda68" containerName="dnsmasq-dns" containerID="cri-o://fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0" gracePeriod=10 Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.664445 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4zk4" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.746839 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-combined-ca-bundle\") pod \"9d6a29ee-be36-4454-bf92-6dfffd45687b\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.746927 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6a29ee-be36-4454-bf92-6dfffd45687b-logs\") pod \"9d6a29ee-be36-4454-bf92-6dfffd45687b\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.747016 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-config-data\") pod \"9d6a29ee-be36-4454-bf92-6dfffd45687b\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.747353 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-scripts\") pod \"9d6a29ee-be36-4454-bf92-6dfffd45687b\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.747428 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76h48\" (UniqueName: \"kubernetes.io/projected/9d6a29ee-be36-4454-bf92-6dfffd45687b-kube-api-access-76h48\") pod \"9d6a29ee-be36-4454-bf92-6dfffd45687b\" (UID: \"9d6a29ee-be36-4454-bf92-6dfffd45687b\") " Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.751681 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d6a29ee-be36-4454-bf92-6dfffd45687b-logs" (OuterVolumeSpecName: "logs") pod "9d6a29ee-be36-4454-bf92-6dfffd45687b" (UID: "9d6a29ee-be36-4454-bf92-6dfffd45687b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.798082 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d6a29ee-be36-4454-bf92-6dfffd45687b-kube-api-access-76h48" (OuterVolumeSpecName: "kube-api-access-76h48") pod "9d6a29ee-be36-4454-bf92-6dfffd45687b" (UID: "9d6a29ee-be36-4454-bf92-6dfffd45687b"). InnerVolumeSpecName "kube-api-access-76h48". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.802235 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-scripts" (OuterVolumeSpecName: "scripts") pod "9d6a29ee-be36-4454-bf92-6dfffd45687b" (UID: "9d6a29ee-be36-4454-bf92-6dfffd45687b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.857618 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.861216 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76h48\" (UniqueName: \"kubernetes.io/projected/9d6a29ee-be36-4454-bf92-6dfffd45687b-kube-api-access-76h48\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.861239 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6a29ee-be36-4454-bf92-6dfffd45687b-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.868006 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-config-data" (OuterVolumeSpecName: "config-data") pod "9d6a29ee-be36-4454-bf92-6dfffd45687b" (UID: "9d6a29ee-be36-4454-bf92-6dfffd45687b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.874709 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d6a29ee-be36-4454-bf92-6dfffd45687b" (UID: "9d6a29ee-be36-4454-bf92-6dfffd45687b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.926035 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.964109 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:03 crc kubenswrapper[4650]: I0201 07:42:03.964139 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6a29ee-be36-4454-bf92-6dfffd45687b-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.017156 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.018269 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.065359 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-config\") pod \"dfb53357-604b-407e-8577-36288efeda68\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.065403 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-sb\") pod \"dfb53357-604b-407e-8577-36288efeda68\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.065421 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmtqp\" (UniqueName: \"kubernetes.io/projected/dfb53357-604b-407e-8577-36288efeda68-kube-api-access-jmtqp\") pod \"dfb53357-604b-407e-8577-36288efeda68\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.065449 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-dns-svc\") pod \"dfb53357-604b-407e-8577-36288efeda68\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.065494 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-nb\") pod \"dfb53357-604b-407e-8577-36288efeda68\" (UID: \"dfb53357-604b-407e-8577-36288efeda68\") " Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.073986 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfb53357-604b-407e-8577-36288efeda68-kube-api-access-jmtqp" (OuterVolumeSpecName: "kube-api-access-jmtqp") pod "dfb53357-604b-407e-8577-36288efeda68" (UID: "dfb53357-604b-407e-8577-36288efeda68"). InnerVolumeSpecName "kube-api-access-jmtqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.102429 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4zk4" event={"ID":"9d6a29ee-be36-4454-bf92-6dfffd45687b","Type":"ContainerDied","Data":"1105e710fca54cc120195610252154eff4e55e46ce3e0055cd8090cb6a4e1bbf"} Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.102468 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1105e710fca54cc120195610252154eff4e55e46ce3e0055cd8090cb6a4e1bbf" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.102521 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4zk4" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.124805 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dfb53357-604b-407e-8577-36288efeda68" (UID: "dfb53357-604b-407e-8577-36288efeda68"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.135923 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.135970 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.138327 4650 generic.go:334] "Generic (PLEG): container finished" podID="dfb53357-604b-407e-8577-36288efeda68" containerID="fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0" exitCode=0 Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.139243 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-84j7d" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.139610 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-84j7d" event={"ID":"dfb53357-604b-407e-8577-36288efeda68","Type":"ContainerDied","Data":"fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0"} Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.139639 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-84j7d" event={"ID":"dfb53357-604b-407e-8577-36288efeda68","Type":"ContainerDied","Data":"7561426deccf293624b1ba32a6098facf72a6a7c9a2a449385e151c05a5e34ac"} Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.139655 4650 scope.go:117] "RemoveContainer" containerID="fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.147446 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dfb53357-604b-407e-8577-36288efeda68" (UID: "dfb53357-604b-407e-8577-36288efeda68"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.168218 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.168251 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmtqp\" (UniqueName: \"kubernetes.io/projected/dfb53357-604b-407e-8577-36288efeda68-kube-api-access-jmtqp\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.168262 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.172488 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-config" (OuterVolumeSpecName: "config") pod "dfb53357-604b-407e-8577-36288efeda68" (UID: "dfb53357-604b-407e-8577-36288efeda68"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.180957 4650 scope.go:117] "RemoveContainer" containerID="f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.211864 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dfb53357-604b-407e-8577-36288efeda68" (UID: "dfb53357-604b-407e-8577-36288efeda68"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.226142 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.269510 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.269542 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb53357-604b-407e-8577-36288efeda68-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.314499 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-87f9c5788-4s9lh"] Feb 01 07:42:04 crc kubenswrapper[4650]: E0201 07:42:04.314840 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfb53357-604b-407e-8577-36288efeda68" containerName="init" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.314855 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfb53357-604b-407e-8577-36288efeda68" containerName="init" Feb 01 07:42:04 crc kubenswrapper[4650]: E0201 07:42:04.314867 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfb53357-604b-407e-8577-36288efeda68" containerName="dnsmasq-dns" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.314874 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfb53357-604b-407e-8577-36288efeda68" containerName="dnsmasq-dns" Feb 01 07:42:04 crc kubenswrapper[4650]: E0201 07:42:04.314889 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6a29ee-be36-4454-bf92-6dfffd45687b" containerName="placement-db-sync" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.314896 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6a29ee-be36-4454-bf92-6dfffd45687b" containerName="placement-db-sync" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.315159 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfb53357-604b-407e-8577-36288efeda68" containerName="dnsmasq-dns" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.315185 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d6a29ee-be36-4454-bf92-6dfffd45687b" containerName="placement-db-sync" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.316002 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.320924 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.321153 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.321275 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.321482 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.321674 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-dd86b" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.340724 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-87f9c5788-4s9lh"] Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.371825 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-combined-ca-bundle\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.371912 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae091b31-cc44-44f4-a374-6373c9501292-logs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.371939 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-scripts\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.371968 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-internal-tls-certs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.372016 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbblt\" (UniqueName: \"kubernetes.io/projected/ae091b31-cc44-44f4-a374-6373c9501292-kube-api-access-cbblt\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.372087 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-config-data\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.372184 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-public-tls-certs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.475332 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-config-data\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.475760 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-public-tls-certs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.475804 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-combined-ca-bundle\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.475841 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae091b31-cc44-44f4-a374-6373c9501292-logs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.475858 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-scripts\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.475883 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-internal-tls-certs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.475901 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbblt\" (UniqueName: \"kubernetes.io/projected/ae091b31-cc44-44f4-a374-6373c9501292-kube-api-access-cbblt\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.476544 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae091b31-cc44-44f4-a374-6373c9501292-logs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.480789 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-public-tls-certs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.484070 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-internal-tls-certs\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.489176 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-84j7d"] Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.489473 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-scripts\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.497555 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-84j7d"] Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.497952 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-config-data\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.510288 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbblt\" (UniqueName: \"kubernetes.io/projected/ae091b31-cc44-44f4-a374-6373c9501292-kube-api-access-cbblt\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.510574 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-combined-ca-bundle\") pod \"placement-87f9c5788-4s9lh\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:04 crc kubenswrapper[4650]: I0201 07:42:04.697134 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:05 crc kubenswrapper[4650]: I0201 07:42:05.152841 4650 generic.go:334] "Generic (PLEG): container finished" podID="b0b99236-33b9-4191-8139-8afbda8a3329" containerID="99550a12eafed3327c442223efb94cccfd671910766cf2c64f63138a238ccfb4" exitCode=0 Feb 01 07:42:05 crc kubenswrapper[4650]: I0201 07:42:05.152876 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nfj" event={"ID":"b0b99236-33b9-4191-8139-8afbda8a3329","Type":"ContainerDied","Data":"99550a12eafed3327c442223efb94cccfd671910766cf2c64f63138a238ccfb4"} Feb 01 07:42:05 crc kubenswrapper[4650]: I0201 07:42:05.966378 4650 scope.go:117] "RemoveContainer" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" Feb 01 07:42:05 crc kubenswrapper[4650]: I0201 07:42:05.966447 4650 scope.go:117] "RemoveContainer" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" Feb 01 07:42:05 crc kubenswrapper[4650]: I0201 07:42:05.966538 4650 scope.go:117] "RemoveContainer" containerID="b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595" Feb 01 07:42:05 crc kubenswrapper[4650]: I0201 07:42:05.977779 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfb53357-604b-407e-8577-36288efeda68" path="/var/lib/kubelet/pods/dfb53357-604b-407e-8577-36288efeda68/volumes" Feb 01 07:42:07 crc kubenswrapper[4650]: I0201 07:42:07.161728 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:42:07 crc kubenswrapper[4650]: I0201 07:42:07.162275 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:42:07 crc kubenswrapper[4650]: I0201 07:42:07.828984 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 01 07:42:07 crc kubenswrapper[4650]: I0201 07:42:07.829055 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 01 07:42:07 crc kubenswrapper[4650]: I0201 07:42:07.878923 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 01 07:42:07 crc kubenswrapper[4650]: I0201 07:42:07.879923 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 01 07:42:08 crc kubenswrapper[4650]: I0201 07:42:08.177596 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 01 07:42:08 crc kubenswrapper[4650]: I0201 07:42:08.177623 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.254739 4650 scope.go:117] "RemoveContainer" containerID="fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0" Feb 01 07:42:09 crc kubenswrapper[4650]: E0201 07:42:09.255816 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0\": container with ID starting with fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0 not found: ID does not exist" containerID="fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.255871 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0"} err="failed to get container status \"fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0\": rpc error: code = NotFound desc = could not find container \"fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0\": container with ID starting with fe03c09d32473e68729d1713a4e64b958a1db007ebdfe252ecc081294bbbf6b0 not found: ID does not exist" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.255902 4650 scope.go:117] "RemoveContainer" containerID="f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d" Feb 01 07:42:09 crc kubenswrapper[4650]: E0201 07:42:09.256382 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d\": container with ID starting with f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d not found: ID does not exist" containerID="f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.256415 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d"} err="failed to get container status \"f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d\": rpc error: code = NotFound desc = could not find container \"f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d\": container with ID starting with f57ce8578ac28782d75e21e796d1f228046a9fe8f81dc9ea69507661b03c5a1d not found: ID does not exist" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.542216 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.677217 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-fernet-keys\") pod \"b0b99236-33b9-4191-8139-8afbda8a3329\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.677259 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdqgc\" (UniqueName: \"kubernetes.io/projected/b0b99236-33b9-4191-8139-8afbda8a3329-kube-api-access-vdqgc\") pod \"b0b99236-33b9-4191-8139-8afbda8a3329\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.677312 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-scripts\") pod \"b0b99236-33b9-4191-8139-8afbda8a3329\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.677337 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-config-data\") pod \"b0b99236-33b9-4191-8139-8afbda8a3329\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.677419 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-credential-keys\") pod \"b0b99236-33b9-4191-8139-8afbda8a3329\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.677455 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-combined-ca-bundle\") pod \"b0b99236-33b9-4191-8139-8afbda8a3329\" (UID: \"b0b99236-33b9-4191-8139-8afbda8a3329\") " Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.687536 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b0b99236-33b9-4191-8139-8afbda8a3329" (UID: "b0b99236-33b9-4191-8139-8afbda8a3329"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.688256 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0b99236-33b9-4191-8139-8afbda8a3329-kube-api-access-vdqgc" (OuterVolumeSpecName: "kube-api-access-vdqgc") pod "b0b99236-33b9-4191-8139-8afbda8a3329" (UID: "b0b99236-33b9-4191-8139-8afbda8a3329"). InnerVolumeSpecName "kube-api-access-vdqgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.689098 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b0b99236-33b9-4191-8139-8afbda8a3329" (UID: "b0b99236-33b9-4191-8139-8afbda8a3329"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.701190 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-scripts" (OuterVolumeSpecName: "scripts") pod "b0b99236-33b9-4191-8139-8afbda8a3329" (UID: "b0b99236-33b9-4191-8139-8afbda8a3329"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.715671 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b0b99236-33b9-4191-8139-8afbda8a3329" (UID: "b0b99236-33b9-4191-8139-8afbda8a3329"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.744076 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-config-data" (OuterVolumeSpecName: "config-data") pod "b0b99236-33b9-4191-8139-8afbda8a3329" (UID: "b0b99236-33b9-4191-8139-8afbda8a3329"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.780807 4650 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.780838 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.780846 4650 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.780854 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdqgc\" (UniqueName: \"kubernetes.io/projected/b0b99236-33b9-4191-8139-8afbda8a3329-kube-api-access-vdqgc\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.780867 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.780875 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0b99236-33b9-4191-8139-8afbda8a3329-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:09 crc kubenswrapper[4650]: I0201 07:42:09.882680 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-87f9c5788-4s9lh"] Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.239090 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-87f9c5788-4s9lh" event={"ID":"ae091b31-cc44-44f4-a374-6373c9501292","Type":"ContainerStarted","Data":"dd45ddcb48430dde063fa9684da7e72be8afb55c3933f96efd717d57199810f0"} Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.264668 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-kzjnq" event={"ID":"2208b1dc-dbac-498a-a760-21257b722e80","Type":"ContainerStarted","Data":"56ccf51c02b67d917b039a07a1d5a5a8ac5d69569a9595db4ffe6c390d10bd0e"} Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.296695 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerStarted","Data":"85595216f4289a1ec765699bd7fe57174cf0eed41e79b56b03aa606b0d164083"} Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.303969 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-kzjnq" podStartSLOduration=3.481113722 podStartE2EDuration="56.303917495s" podCreationTimestamp="2026-02-01 07:41:14 +0000 UTC" firstStartedPulling="2026-02-01 07:41:16.697662504 +0000 UTC m=+1075.420760749" lastFinishedPulling="2026-02-01 07:42:09.520466267 +0000 UTC m=+1128.243564522" observedRunningTime="2026-02-01 07:42:10.283367039 +0000 UTC m=+1129.006465284" watchObservedRunningTime="2026-02-01 07:42:10.303917495 +0000 UTC m=+1129.027015740" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.337479 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31"} Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.337523 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492"} Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.347882 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.347903 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.348278 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nfj" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.348618 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nfj" event={"ID":"b0b99236-33b9-4191-8139-8afbda8a3329","Type":"ContainerDied","Data":"e4f022ddfdbdf21edede9cc3d45f026e5cfa5544faac89746b88c86202777a01"} Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.348641 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4f022ddfdbdf21edede9cc3d45f026e5cfa5544faac89746b88c86202777a01" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.507781 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.507892 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.567691 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.802816 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-b4f94b5c6-zjcnl"] Feb 01 07:42:10 crc kubenswrapper[4650]: E0201 07:42:10.817710 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0b99236-33b9-4191-8139-8afbda8a3329" containerName="keystone-bootstrap" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.817735 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0b99236-33b9-4191-8139-8afbda8a3329" containerName="keystone-bootstrap" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.817939 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0b99236-33b9-4191-8139-8afbda8a3329" containerName="keystone-bootstrap" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.818520 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.822912 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b4f94b5c6-zjcnl"] Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.831881 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.832151 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8zg69" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.832398 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.832472 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.832413 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.832651 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914158 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-config-data\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914249 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-combined-ca-bundle\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914291 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-internal-tls-certs\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914340 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-scripts\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914382 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-public-tls-certs\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914457 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-credential-keys\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914498 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxkxl\" (UniqueName: \"kubernetes.io/projected/b99a5b57-fa0e-464e-a115-4afe6f30f193-kube-api-access-gxkxl\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:10 crc kubenswrapper[4650]: I0201 07:42:10.914550 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-fernet-keys\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.016183 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-combined-ca-bundle\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.016496 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-internal-tls-certs\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.016600 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-scripts\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.016677 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-public-tls-certs\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.016784 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-credential-keys\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.016873 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxkxl\" (UniqueName: \"kubernetes.io/projected/b99a5b57-fa0e-464e-a115-4afe6f30f193-kube-api-access-gxkxl\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.016953 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-fernet-keys\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.017087 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-config-data\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.022551 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-credential-keys\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.022604 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-internal-tls-certs\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.023607 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-public-tls-certs\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.025604 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-config-data\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.030856 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-fernet-keys\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.031391 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-combined-ca-bundle\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.031670 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b99a5b57-fa0e-464e-a115-4afe6f30f193-scripts\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.043707 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxkxl\" (UniqueName: \"kubernetes.io/projected/b99a5b57-fa0e-464e-a115-4afe6f30f193-kube-api-access-gxkxl\") pod \"keystone-b4f94b5c6-zjcnl\" (UID: \"b99a5b57-fa0e-464e-a115-4afe6f30f193\") " pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.143842 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.384194 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" exitCode=1 Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.384618 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" exitCode=1 Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.384657 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31"} Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.384681 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492"} Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.384690 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83"} Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.384705 4650 scope.go:117] "RemoveContainer" containerID="8c262c59779118f7852a08d8e244dfa36953485fce537dcd86580dc10cd0ba0b" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.385413 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.385474 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:42:11 crc kubenswrapper[4650]: E0201 07:42:11.385842 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.392500 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-87f9c5788-4s9lh" event={"ID":"ae091b31-cc44-44f4-a374-6373c9501292","Type":"ContainerStarted","Data":"16e1f0a1172f0b931837eee6c283685a5230303871b51476c961200d508c6bd7"} Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.392533 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-87f9c5788-4s9lh" event={"ID":"ae091b31-cc44-44f4-a374-6373c9501292","Type":"ContainerStarted","Data":"1b32ba94858cc0c3a1436431907746a3cd185a00b7f2d436e309e29710dc850e"} Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.393356 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.393383 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.400135 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x99cv" event={"ID":"00154668-79cc-4c4d-81f9-e7975168f700","Type":"ContainerStarted","Data":"3f593a11994ea5a5c968f9dfb5d93197e2f4bbb0e46afaf85e0d37a73181b3d5"} Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.477643 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-x99cv" podStartSLOduration=4.908887083 podStartE2EDuration="58.477624278s" podCreationTimestamp="2026-02-01 07:41:13 +0000 UTC" firstStartedPulling="2026-02-01 07:41:15.927621413 +0000 UTC m=+1074.650719658" lastFinishedPulling="2026-02-01 07:42:09.496358618 +0000 UTC m=+1128.219456853" observedRunningTime="2026-02-01 07:42:11.476191251 +0000 UTC m=+1130.199289496" watchObservedRunningTime="2026-02-01 07:42:11.477624278 +0000 UTC m=+1130.200722523" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.504378 4650 scope.go:117] "RemoveContainer" containerID="6504b725df1bfeb92243f472b8d0d3567369670c116e0b52078bea24ff64fa18" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.596156 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.596264 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.644117 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-87f9c5788-4s9lh" podStartSLOduration=7.644091489 podStartE2EDuration="7.644091489s" podCreationTimestamp="2026-02-01 07:42:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:11.522213381 +0000 UTC m=+1130.245311636" watchObservedRunningTime="2026-02-01 07:42:11.644091489 +0000 UTC m=+1130.367189734" Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.765642 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b4f94b5c6-zjcnl"] Feb 01 07:42:11 crc kubenswrapper[4650]: I0201 07:42:11.979557 4650 scope.go:117] "RemoveContainer" containerID="6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd" Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.411312 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/1.log" Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.412577 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerStarted","Data":"4eeac89e42f7d84ac549f5e4414dbc7ad5341ac21121b624c78eb8dcc1fba363"} Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.412819 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.417453 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b4f94b5c6-zjcnl" event={"ID":"b99a5b57-fa0e-464e-a115-4afe6f30f193","Type":"ContainerStarted","Data":"c2b3a18f1028afb14b4110f4e2e97bbf492c70f2f06a4f2d872d9113ddefe0ce"} Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.417504 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b4f94b5c6-zjcnl" event={"ID":"b99a5b57-fa0e-464e-a115-4afe6f30f193","Type":"ContainerStarted","Data":"69966d29553c58bea114c63685fc54dd927af52a7ff63464ad34f2f72d9e7cee"} Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.417576 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.427456 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.427523 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:42:12 crc kubenswrapper[4650]: E0201 07:42:12.427870 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.493416 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-b4f94b5c6-zjcnl" podStartSLOduration=2.493398514 podStartE2EDuration="2.493398514s" podCreationTimestamp="2026-02-01 07:42:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:12.49135076 +0000 UTC m=+1131.214449015" watchObservedRunningTime="2026-02-01 07:42:12.493398514 +0000 UTC m=+1131.216496759" Feb 01 07:42:12 crc kubenswrapper[4650]: I0201 07:42:12.799328 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.465850 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/2.log" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.467010 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/1.log" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.467998 4650 generic.go:334] "Generic (PLEG): container finished" podID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerID="4eeac89e42f7d84ac549f5e4414dbc7ad5341ac21121b624c78eb8dcc1fba363" exitCode=1 Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.468109 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerDied","Data":"4eeac89e42f7d84ac549f5e4414dbc7ad5341ac21121b624c78eb8dcc1fba363"} Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.468156 4650 scope.go:117] "RemoveContainer" containerID="6f8c8dc1eda654014f9579a906ac62edf2663c5e2ac13f6a093be99a40d08ffd" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.468676 4650 scope.go:117] "RemoveContainer" containerID="4eeac89e42f7d84ac549f5e4414dbc7ad5341ac21121b624c78eb8dcc1fba363" Feb 01 07:42:13 crc kubenswrapper[4650]: E0201 07:42:13.468882 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"neutron-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=neutron-httpd pod=neutron-85ff8d5c86-hrgmh_openstack(d8cab23a-57a2-432e-9aa8-1ffc44434d58)\"" pod="openstack/neutron-85ff8d5c86-hrgmh" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.511469 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" exitCode=1 Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.512371 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83"} Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.512781 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.512842 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.512943 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:42:13 crc kubenswrapper[4650]: E0201 07:42:13.513216 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:42:13 crc kubenswrapper[4650]: I0201 07:42:13.627209 4650 scope.go:117] "RemoveContainer" containerID="b10079ec0287249a789ed330b62eed00b2c8d97c8f8b7f0b73fdae340eb33595" Feb 01 07:42:14 crc kubenswrapper[4650]: I0201 07:42:14.017247 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Feb 01 07:42:14 crc kubenswrapper[4650]: I0201 07:42:14.135832 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-79fd8b5f84-qg9cv" podUID="9c4bad14-279f-4212-a86d-cea1c9fe7b48" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Feb 01 07:42:14 crc kubenswrapper[4650]: I0201 07:42:14.523320 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/2.log" Feb 01 07:42:14 crc kubenswrapper[4650]: I0201 07:42:14.526687 4650 scope.go:117] "RemoveContainer" containerID="4eeac89e42f7d84ac549f5e4414dbc7ad5341ac21121b624c78eb8dcc1fba363" Feb 01 07:42:14 crc kubenswrapper[4650]: E0201 07:42:14.527017 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"neutron-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=neutron-httpd pod=neutron-85ff8d5c86-hrgmh_openstack(d8cab23a-57a2-432e-9aa8-1ffc44434d58)\"" pod="openstack/neutron-85ff8d5c86-hrgmh" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" Feb 01 07:42:17 crc kubenswrapper[4650]: I0201 07:42:17.576470 4650 generic.go:334] "Generic (PLEG): container finished" podID="2208b1dc-dbac-498a-a760-21257b722e80" containerID="56ccf51c02b67d917b039a07a1d5a5a8ac5d69569a9595db4ffe6c390d10bd0e" exitCode=0 Feb 01 07:42:17 crc kubenswrapper[4650]: I0201 07:42:17.576575 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-kzjnq" event={"ID":"2208b1dc-dbac-498a-a760-21257b722e80","Type":"ContainerDied","Data":"56ccf51c02b67d917b039a07a1d5a5a8ac5d69569a9595db4ffe6c390d10bd0e"} Feb 01 07:42:21 crc kubenswrapper[4650]: I0201 07:42:21.623561 4650 generic.go:334] "Generic (PLEG): container finished" podID="00154668-79cc-4c4d-81f9-e7975168f700" containerID="3f593a11994ea5a5c968f9dfb5d93197e2f4bbb0e46afaf85e0d37a73181b3d5" exitCode=0 Feb 01 07:42:21 crc kubenswrapper[4650]: I0201 07:42:21.623654 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x99cv" event={"ID":"00154668-79cc-4c4d-81f9-e7975168f700","Type":"ContainerDied","Data":"3f593a11994ea5a5c968f9dfb5d93197e2f4bbb0e46afaf85e0d37a73181b3d5"} Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.240198 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.363093 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-db-sync-config-data\") pod \"2208b1dc-dbac-498a-a760-21257b722e80\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.363145 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vr6s\" (UniqueName: \"kubernetes.io/projected/2208b1dc-dbac-498a-a760-21257b722e80-kube-api-access-6vr6s\") pod \"2208b1dc-dbac-498a-a760-21257b722e80\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.363235 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-combined-ca-bundle\") pod \"2208b1dc-dbac-498a-a760-21257b722e80\" (UID: \"2208b1dc-dbac-498a-a760-21257b722e80\") " Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.369175 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2208b1dc-dbac-498a-a760-21257b722e80" (UID: "2208b1dc-dbac-498a-a760-21257b722e80"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.372378 4650 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.382153 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2208b1dc-dbac-498a-a760-21257b722e80-kube-api-access-6vr6s" (OuterVolumeSpecName: "kube-api-access-6vr6s") pod "2208b1dc-dbac-498a-a760-21257b722e80" (UID: "2208b1dc-dbac-498a-a760-21257b722e80"). InnerVolumeSpecName "kube-api-access-6vr6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.416302 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2208b1dc-dbac-498a-a760-21257b722e80" (UID: "2208b1dc-dbac-498a-a760-21257b722e80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.473850 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vr6s\" (UniqueName: \"kubernetes.io/projected/2208b1dc-dbac-498a-a760-21257b722e80-kube-api-access-6vr6s\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.474154 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2208b1dc-dbac-498a-a760-21257b722e80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.632971 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-kzjnq" event={"ID":"2208b1dc-dbac-498a-a760-21257b722e80","Type":"ContainerDied","Data":"dbb2991e778e7379c7450119910b74520c3806948faea3525a6bd5408f81559e"} Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.633020 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbb2991e778e7379c7450119910b74520c3806948faea3525a6bd5408f81559e" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.633054 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-kzjnq" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.636323 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerStarted","Data":"ad42355a134d7477668342e74df4ce08c5c1535c48ba45ee58f5937d2ccbe086"} Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.636532 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-central-agent" containerID="cri-o://c727b8fe9386e32e0f907f7dd32d6dc2f2f2b3f3323c8ca2015c7aa290fbab08" gracePeriod=30 Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.636544 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="sg-core" containerID="cri-o://85595216f4289a1ec765699bd7fe57174cf0eed41e79b56b03aa606b0d164083" gracePeriod=30 Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.636597 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-notification-agent" containerID="cri-o://e8583e40abe46d8cdc8a77461093489f5ae34b53d356d113956d6178480920dc" gracePeriod=30 Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.636837 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="proxy-httpd" containerID="cri-o://ad42355a134d7477668342e74df4ce08c5c1535c48ba45ee58f5937d2ccbe086" gracePeriod=30 Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.667463 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.079460349 podStartE2EDuration="1m8.667444822s" podCreationTimestamp="2026-02-01 07:41:14 +0000 UTC" firstStartedPulling="2026-02-01 07:41:16.697987952 +0000 UTC m=+1075.421086197" lastFinishedPulling="2026-02-01 07:42:22.285972425 +0000 UTC m=+1141.009070670" observedRunningTime="2026-02-01 07:42:22.660506701 +0000 UTC m=+1141.383604946" watchObservedRunningTime="2026-02-01 07:42:22.667444822 +0000 UTC m=+1141.390543067" Feb 01 07:42:22 crc kubenswrapper[4650]: I0201 07:42:22.988706 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x99cv" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.097644 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/00154668-79cc-4c4d-81f9-e7975168f700-etc-machine-id\") pod \"00154668-79cc-4c4d-81f9-e7975168f700\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.097916 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdsvl\" (UniqueName: \"kubernetes.io/projected/00154668-79cc-4c4d-81f9-e7975168f700-kube-api-access-jdsvl\") pod \"00154668-79cc-4c4d-81f9-e7975168f700\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.097939 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-combined-ca-bundle\") pod \"00154668-79cc-4c4d-81f9-e7975168f700\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.097785 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/00154668-79cc-4c4d-81f9-e7975168f700-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "00154668-79cc-4c4d-81f9-e7975168f700" (UID: "00154668-79cc-4c4d-81f9-e7975168f700"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.097985 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-db-sync-config-data\") pod \"00154668-79cc-4c4d-81f9-e7975168f700\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.098009 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-scripts\") pod \"00154668-79cc-4c4d-81f9-e7975168f700\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.098153 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-config-data\") pod \"00154668-79cc-4c4d-81f9-e7975168f700\" (UID: \"00154668-79cc-4c4d-81f9-e7975168f700\") " Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.098818 4650 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/00154668-79cc-4c4d-81f9-e7975168f700-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.103389 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "00154668-79cc-4c4d-81f9-e7975168f700" (UID: "00154668-79cc-4c4d-81f9-e7975168f700"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.105269 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00154668-79cc-4c4d-81f9-e7975168f700-kube-api-access-jdsvl" (OuterVolumeSpecName: "kube-api-access-jdsvl") pod "00154668-79cc-4c4d-81f9-e7975168f700" (UID: "00154668-79cc-4c4d-81f9-e7975168f700"). InnerVolumeSpecName "kube-api-access-jdsvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.113149 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-scripts" (OuterVolumeSpecName: "scripts") pod "00154668-79cc-4c4d-81f9-e7975168f700" (UID: "00154668-79cc-4c4d-81f9-e7975168f700"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.135177 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00154668-79cc-4c4d-81f9-e7975168f700" (UID: "00154668-79cc-4c4d-81f9-e7975168f700"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.154145 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-config-data" (OuterVolumeSpecName: "config-data") pod "00154668-79cc-4c4d-81f9-e7975168f700" (UID: "00154668-79cc-4c4d-81f9-e7975168f700"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.200759 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.200795 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdsvl\" (UniqueName: \"kubernetes.io/projected/00154668-79cc-4c4d-81f9-e7975168f700-kube-api-access-jdsvl\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.200819 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.200830 4650 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.200839 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00154668-79cc-4c4d-81f9-e7975168f700-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.212522 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.213217 4650 scope.go:117] "RemoveContainer" containerID="4eeac89e42f7d84ac549f5e4414dbc7ad5341ac21121b624c78eb8dcc1fba363" Feb 01 07:42:23 crc kubenswrapper[4650]: E0201 07:42:23.213392 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"neutron-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=neutron-httpd pod=neutron-85ff8d5c86-hrgmh_openstack(d8cab23a-57a2-432e-9aa8-1ffc44434d58)\"" pod="openstack/neutron-85ff8d5c86-hrgmh" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.216930 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-85ff8d5c86-hrgmh" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-api" probeResult="failure" output="Get \"http://10.217.0.155:9696/\": dial tcp 10.217.0.155:9696: connect: connection refused" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.558201 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-595fb9c59d-bnj8v"] Feb 01 07:42:23 crc kubenswrapper[4650]: E0201 07:42:23.558580 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2208b1dc-dbac-498a-a760-21257b722e80" containerName="barbican-db-sync" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.558592 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="2208b1dc-dbac-498a-a760-21257b722e80" containerName="barbican-db-sync" Feb 01 07:42:23 crc kubenswrapper[4650]: E0201 07:42:23.558606 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00154668-79cc-4c4d-81f9-e7975168f700" containerName="cinder-db-sync" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.558612 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="00154668-79cc-4c4d-81f9-e7975168f700" containerName="cinder-db-sync" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.558780 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="00154668-79cc-4c4d-81f9-e7975168f700" containerName="cinder-db-sync" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.558796 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="2208b1dc-dbac-498a-a760-21257b722e80" containerName="barbican-db-sync" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.559647 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.604150 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.605045 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bkmh9" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.605623 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.616686 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-8466dd5d47-jv8ww"] Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.618143 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.630979 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.645244 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-595fb9c59d-bnj8v"] Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.655782 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x99cv" event={"ID":"00154668-79cc-4c4d-81f9-e7975168f700","Type":"ContainerDied","Data":"a26dd22ebba254fe4783437cb51de623db7cbc3fdf767d809ecbdf98686b361c"} Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.655827 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a26dd22ebba254fe4783437cb51de623db7cbc3fdf767d809ecbdf98686b361c" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.655896 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x99cv" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.665946 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-8466dd5d47-jv8ww"] Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.682409 4650 generic.go:334] "Generic (PLEG): container finished" podID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerID="ad42355a134d7477668342e74df4ce08c5c1535c48ba45ee58f5937d2ccbe086" exitCode=0 Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.682438 4650 generic.go:334] "Generic (PLEG): container finished" podID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerID="85595216f4289a1ec765699bd7fe57174cf0eed41e79b56b03aa606b0d164083" exitCode=2 Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.682444 4650 generic.go:334] "Generic (PLEG): container finished" podID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerID="c727b8fe9386e32e0f907f7dd32d6dc2f2f2b3f3323c8ca2015c7aa290fbab08" exitCode=0 Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.682465 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerDied","Data":"ad42355a134d7477668342e74df4ce08c5c1535c48ba45ee58f5937d2ccbe086"} Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.682488 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerDied","Data":"85595216f4289a1ec765699bd7fe57174cf0eed41e79b56b03aa606b0d164083"} Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.682498 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerDied","Data":"c727b8fe9386e32e0f907f7dd32d6dc2f2f2b3f3323c8ca2015c7aa290fbab08"} Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708308 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-config-data-custom\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708367 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-config-data\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708385 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac48219-ace9-4dec-a04a-c710e730a1d4-logs\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708410 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mggnb\" (UniqueName: \"kubernetes.io/projected/4ac48219-ace9-4dec-a04a-c710e730a1d4-kube-api-access-mggnb\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708442 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-config-data\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708460 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-logs\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708485 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-combined-ca-bundle\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708503 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t9cm\" (UniqueName: \"kubernetes.io/projected/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-kube-api-access-7t9cm\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708567 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-config-data-custom\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.708585 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-combined-ca-bundle\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.775375 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-798d46d59c-f9x7p"] Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.776677 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.810180 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mggnb\" (UniqueName: \"kubernetes.io/projected/4ac48219-ace9-4dec-a04a-c710e730a1d4-kube-api-access-mggnb\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.810234 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-config-data\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.818186 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-logs\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.818573 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-logs\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.818666 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-combined-ca-bundle\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.818694 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t9cm\" (UniqueName: \"kubernetes.io/projected/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-kube-api-access-7t9cm\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.818882 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-config-data-custom\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.818908 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-combined-ca-bundle\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.818950 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-config-data-custom\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.819057 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-config-data\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.819079 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac48219-ace9-4dec-a04a-c710e730a1d4-logs\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.819378 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac48219-ace9-4dec-a04a-c710e730a1d4-logs\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.819607 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-798d46d59c-f9x7p"] Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.831721 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-combined-ca-bundle\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.836870 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-config-data-custom\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.837411 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-combined-ca-bundle\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.838375 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac48219-ace9-4dec-a04a-c710e730a1d4-config-data\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.838936 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-config-data-custom\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.839303 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-config-data\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.896560 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mggnb\" (UniqueName: \"kubernetes.io/projected/4ac48219-ace9-4dec-a04a-c710e730a1d4-kube-api-access-mggnb\") pod \"barbican-keystone-listener-595fb9c59d-bnj8v\" (UID: \"4ac48219-ace9-4dec-a04a-c710e730a1d4\") " pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.911606 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t9cm\" (UniqueName: \"kubernetes.io/projected/8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa-kube-api-access-7t9cm\") pod \"barbican-worker-8466dd5d47-jv8ww\" (UID: \"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa\") " pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.921895 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-config\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.921951 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds8hh\" (UniqueName: \"kubernetes.io/projected/510ee4f3-4c57-44cf-8e18-32b303ace963-kube-api-access-ds8hh\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.922035 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-dns-svc\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.922082 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-sb\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.922125 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-nb\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:23 crc kubenswrapper[4650]: I0201 07:42:23.946970 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-8466dd5d47-jv8ww" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.016914 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.023197 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-sb\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.023268 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-nb\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.023311 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-config\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.023340 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds8hh\" (UniqueName: \"kubernetes.io/projected/510ee4f3-4c57-44cf-8e18-32b303ace963-kube-api-access-ds8hh\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.023399 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-dns-svc\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.024333 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-dns-svc\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.024878 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-sb\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.025692 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-config\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.026457 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-nb\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.037457 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7dcc97964d-q5ws4"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.046153 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.072211 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.107904 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds8hh\" (UniqueName: \"kubernetes.io/projected/510ee4f3-4c57-44cf-8e18-32b303ace963-kube-api-access-ds8hh\") pod \"dnsmasq-dns-798d46d59c-f9x7p\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.120467 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.132207 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.132307 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data-custom\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.132335 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwgvd\" (UniqueName: \"kubernetes.io/projected/60db3db0-16a4-4f77-bbe2-cd46c8b70039-kube-api-access-wwgvd\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.132430 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-combined-ca-bundle\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.132468 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60db3db0-16a4-4f77-bbe2-cd46c8b70039-logs\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.143483 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7dcc97964d-q5ws4"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.144070 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-79fd8b5f84-qg9cv" podUID="9c4bad14-279f-4212-a86d-cea1c9fe7b48" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.196520 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.235811 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-combined-ca-bundle\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.235873 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60db3db0-16a4-4f77-bbe2-cd46c8b70039-logs\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.235920 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.235992 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data-custom\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.236013 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwgvd\" (UniqueName: \"kubernetes.io/projected/60db3db0-16a4-4f77-bbe2-cd46c8b70039-kube-api-access-wwgvd\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.237113 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60db3db0-16a4-4f77-bbe2-cd46c8b70039-logs\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.280495 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-combined-ca-bundle\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.293999 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data-custom\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.327393 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.358072 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwgvd\" (UniqueName: \"kubernetes.io/projected/60db3db0-16a4-4f77-bbe2-cd46c8b70039-kube-api-access-wwgvd\") pod \"barbican-api-7dcc97964d-q5ws4\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.367476 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.453822 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.455583 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.491623 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.491705 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-rtbt8" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.491867 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.491979 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.552829 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftrfv\" (UniqueName: \"kubernetes.io/projected/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-kube-api-access-ftrfv\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.552898 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.552918 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.552968 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.553013 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.553057 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-scripts\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.563520 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.657677 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.658105 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.658152 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-scripts\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.658202 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftrfv\" (UniqueName: \"kubernetes.io/projected/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-kube-api-access-ftrfv\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.658248 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.658263 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.658418 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-798d46d59c-f9x7p"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.659719 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.665718 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.686139 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-scripts\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.688872 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.690355 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.720562 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftrfv\" (UniqueName: \"kubernetes.io/projected/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-kube-api-access-ftrfv\") pod \"cinder-scheduler-0\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.757758 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77c9c856fc-k7lkj"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.760372 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.787408 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.795410 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77c9c856fc-k7lkj"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.862176 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-nb\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.862236 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-sb\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.862320 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-config\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.862345 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-dns-svc\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.862378 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjz2c\" (UniqueName: \"kubernetes.io/projected/4f5171ae-f202-4f51-a05b-c13e7136959c-kube-api-access-hjz2c\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.937444 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.938955 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.942065 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.949230 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.964654 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-config\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.965513 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-dns-svc\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.965557 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjz2c\" (UniqueName: \"kubernetes.io/projected/4f5171ae-f202-4f51-a05b-c13e7136959c-kube-api-access-hjz2c\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.965608 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-nb\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.965646 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-sb\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.968457 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-nb\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.968666 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-dns-svc\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.968946 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-config\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.975750 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-sb\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:24 crc kubenswrapper[4650]: I0201 07:42:24.999167 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjz2c\" (UniqueName: \"kubernetes.io/projected/4f5171ae-f202-4f51-a05b-c13e7136959c-kube-api-access-hjz2c\") pod \"dnsmasq-dns-77c9c856fc-k7lkj\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.069987 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgm2c\" (UniqueName: \"kubernetes.io/projected/7467859e-a792-4959-bd51-d353099352bd-kube-api-access-cgm2c\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.070056 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7467859e-a792-4959-bd51-d353099352bd-logs\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.070128 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.070161 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data-custom\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.070191 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-scripts\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.070214 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7467859e-a792-4959-bd51-d353099352bd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.070240 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.081930 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-8466dd5d47-jv8ww"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.122100 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.171958 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.172017 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data-custom\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.172069 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-scripts\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.172086 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7467859e-a792-4959-bd51-d353099352bd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.172111 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.172160 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgm2c\" (UniqueName: \"kubernetes.io/projected/7467859e-a792-4959-bd51-d353099352bd-kube-api-access-cgm2c\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.172188 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7467859e-a792-4959-bd51-d353099352bd-logs\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.172574 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7467859e-a792-4959-bd51-d353099352bd-logs\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.173000 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7467859e-a792-4959-bd51-d353099352bd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.183414 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.183841 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data-custom\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.184353 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-scripts\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.219346 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgm2c\" (UniqueName: \"kubernetes.io/projected/7467859e-a792-4959-bd51-d353099352bd-kube-api-access-cgm2c\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.219959 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.259794 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.314253 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-798d46d59c-f9x7p"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.334614 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-595fb9c59d-bnj8v"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.447571 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7dcc97964d-q5ws4"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.507745 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.625958 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.652818 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85ff8d5c86-hrgmh"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.653084 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85ff8d5c86-hrgmh" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-api" containerID="cri-o://d3b2ef07e4b18ed02881615aefe34055c60d3300d0846ea348409e2dfae6c9ae" gracePeriod=30 Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.833861 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77c9c856fc-k7lkj"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.879332 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.919323 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3","Type":"ContainerStarted","Data":"7b01d43dc2ce165685eecdd7b86eab64d6e3bdf4f691306069955c9e5d2d6e81"} Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.925497 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7dcc97964d-q5ws4" event={"ID":"60db3db0-16a4-4f77-bbe2-cd46c8b70039","Type":"ContainerStarted","Data":"1fb375e1584c6381e07c347d90d8099ac3c9246a052c136ec773ed0d7a9300eb"} Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.952587 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" event={"ID":"510ee4f3-4c57-44cf-8e18-32b303ace963","Type":"ContainerStarted","Data":"0498fe6ffd00bec85569129879f0f60dcb5271bf75d74ef4846800ead9306f1d"} Feb 01 07:42:25 crc kubenswrapper[4650]: W0201 07:42:25.960417 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f5171ae_f202_4f51_a05b_c13e7136959c.slice/crio-6426e85560160582cd934b0deca0ccfa532a219a19a0f534940752af16302637 WatchSource:0}: Error finding container 6426e85560160582cd934b0deca0ccfa532a219a19a0f534940752af16302637: Status 404 returned error can't find the container with id 6426e85560160582cd934b0deca0ccfa532a219a19a0f534940752af16302637 Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.971216 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-d5c446bd9-7rlx2"] Feb 01 07:42:25 crc kubenswrapper[4650]: I0201 07:42:25.991753 4650 generic.go:334] "Generic (PLEG): container finished" podID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerID="92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10" exitCode=137 Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.016759 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-695d6f76c-qccxs" event={"ID":"f3f97afc-40d4-4fc4-be00-1280202c0a31","Type":"ContainerDied","Data":"92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10"} Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.019965 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.173801 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-d5c446bd9-7rlx2"] Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.173838 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-8466dd5d47-jv8ww" event={"ID":"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa","Type":"ContainerStarted","Data":"21484d22a55aa8140b75c8ae45cd97b01a9e24ea0817514ee54db0489eccae41"} Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.173853 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" event={"ID":"4ac48219-ace9-4dec-a04a-c710e730a1d4","Type":"ContainerStarted","Data":"6a6fbd4985c75162cfab9c9bc53bd975e3c58774ecaf01c13fc1094dce858341"} Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.183571 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwpv5\" (UniqueName: \"kubernetes.io/projected/810a735e-844d-434e-aa3f-7ac5421d1303-kube-api-access-rwpv5\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.183864 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-internal-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.183897 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-public-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.183915 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-ovndb-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.183951 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-httpd-config\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.183969 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-config\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.184018 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-combined-ca-bundle\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.287521 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-internal-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.287588 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-public-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.287609 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-ovndb-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.287647 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-httpd-config\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.287663 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-config\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.287726 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-combined-ca-bundle\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.287799 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwpv5\" (UniqueName: \"kubernetes.io/projected/810a735e-844d-434e-aa3f-7ac5421d1303-kube-api-access-rwpv5\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.306969 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-public-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.307019 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-ovndb-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.309759 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-internal-tls-certs\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.316020 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-httpd-config\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.332312 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-combined-ca-bundle\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.339410 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/810a735e-844d-434e-aa3f-7ac5421d1303-config\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.353709 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwpv5\" (UniqueName: \"kubernetes.io/projected/810a735e-844d-434e-aa3f-7ac5421d1303-kube-api-access-rwpv5\") pod \"neutron-d5c446bd9-7rlx2\" (UID: \"810a735e-844d-434e-aa3f-7ac5421d1303\") " pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.371176 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.790601 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.914740 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3f97afc-40d4-4fc4-be00-1280202c0a31-logs\") pod \"f3f97afc-40d4-4fc4-be00-1280202c0a31\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.914878 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-config-data\") pod \"f3f97afc-40d4-4fc4-be00-1280202c0a31\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.915004 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-scripts\") pod \"f3f97afc-40d4-4fc4-be00-1280202c0a31\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.915043 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tnd7\" (UniqueName: \"kubernetes.io/projected/f3f97afc-40d4-4fc4-be00-1280202c0a31-kube-api-access-6tnd7\") pod \"f3f97afc-40d4-4fc4-be00-1280202c0a31\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.915094 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f3f97afc-40d4-4fc4-be00-1280202c0a31-horizon-secret-key\") pod \"f3f97afc-40d4-4fc4-be00-1280202c0a31\" (UID: \"f3f97afc-40d4-4fc4-be00-1280202c0a31\") " Feb 01 07:42:26 crc kubenswrapper[4650]: I0201 07:42:26.915651 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3f97afc-40d4-4fc4-be00-1280202c0a31-logs" (OuterVolumeSpecName: "logs") pod "f3f97afc-40d4-4fc4-be00-1280202c0a31" (UID: "f3f97afc-40d4-4fc4-be00-1280202c0a31"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.016780 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3f97afc-40d4-4fc4-be00-1280202c0a31-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.090044 4650 generic.go:334] "Generic (PLEG): container finished" podID="510ee4f3-4c57-44cf-8e18-32b303ace963" containerID="db63f7fc657cdce1d0345f72f407a2a1e5fef86444489a36a3e4677ad777016b" exitCode=0 Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.090104 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" event={"ID":"510ee4f3-4c57-44cf-8e18-32b303ace963","Type":"ContainerDied","Data":"db63f7fc657cdce1d0345f72f407a2a1e5fef86444489a36a3e4677ad777016b"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.130487 4650 generic.go:334] "Generic (PLEG): container finished" podID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerID="112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce" exitCode=137 Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.130541 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-695d6f76c-qccxs" event={"ID":"f3f97afc-40d4-4fc4-be00-1280202c0a31","Type":"ContainerDied","Data":"112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.130565 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-695d6f76c-qccxs" event={"ID":"f3f97afc-40d4-4fc4-be00-1280202c0a31","Type":"ContainerDied","Data":"8f052fd8c8f557cd394003259687d65a2bfc7c8955f6c7876aecfcf9bfeb8dab"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.130581 4650 scope.go:117] "RemoveContainer" containerID="112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.130681 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-695d6f76c-qccxs" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.149696 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7467859e-a792-4959-bd51-d353099352bd","Type":"ContainerStarted","Data":"fac7156c95f6ebc86803bd6c5d91b653686f10c64f354fa4732551fb36c9755e"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.151049 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" event={"ID":"4f5171ae-f202-4f51-a05b-c13e7136959c","Type":"ContainerStarted","Data":"6426e85560160582cd934b0deca0ccfa532a219a19a0f534940752af16302637"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.153191 4650 generic.go:334] "Generic (PLEG): container finished" podID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerID="e8583e40abe46d8cdc8a77461093489f5ae34b53d356d113956d6178480920dc" exitCode=0 Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.153230 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerDied","Data":"e8583e40abe46d8cdc8a77461093489f5ae34b53d356d113956d6178480920dc"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.154698 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7dcc97964d-q5ws4" event={"ID":"60db3db0-16a4-4f77-bbe2-cd46c8b70039","Type":"ContainerStarted","Data":"510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.154737 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7dcc97964d-q5ws4" event={"ID":"60db3db0-16a4-4f77-bbe2-cd46c8b70039","Type":"ContainerStarted","Data":"8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f"} Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.155566 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.155586 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:27 crc kubenswrapper[4650]: E0201 07:42:27.496860 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.595578 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3f97afc-40d4-4fc4-be00-1280202c0a31-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "f3f97afc-40d4-4fc4-be00-1280202c0a31" (UID: "f3f97afc-40d4-4fc4-be00-1280202c0a31"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.600376 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-config-data" (OuterVolumeSpecName: "config-data") pod "f3f97afc-40d4-4fc4-be00-1280202c0a31" (UID: "f3f97afc-40d4-4fc4-be00-1280202c0a31"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.602813 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-scripts" (OuterVolumeSpecName: "scripts") pod "f3f97afc-40d4-4fc4-be00-1280202c0a31" (UID: "f3f97afc-40d4-4fc4-be00-1280202c0a31"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.630360 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.630390 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f3f97afc-40d4-4fc4-be00-1280202c0a31-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.630399 4650 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f3f97afc-40d4-4fc4-be00-1280202c0a31-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.642294 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3f97afc-40d4-4fc4-be00-1280202c0a31-kube-api-access-6tnd7" (OuterVolumeSpecName: "kube-api-access-6tnd7") pod "f3f97afc-40d4-4fc4-be00-1280202c0a31" (UID: "f3f97afc-40d4-4fc4-be00-1280202c0a31"). InnerVolumeSpecName "kube-api-access-6tnd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.664537 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7dcc97964d-q5ws4" podStartSLOduration=4.664516415 podStartE2EDuration="4.664516415s" podCreationTimestamp="2026-02-01 07:42:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:27.655333176 +0000 UTC m=+1146.378431431" watchObservedRunningTime="2026-02-01 07:42:27.664516415 +0000 UTC m=+1146.387614660" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.757227 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tnd7\" (UniqueName: \"kubernetes.io/projected/f3f97afc-40d4-4fc4-be00-1280202c0a31-kube-api-access-6tnd7\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.828135 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-695d6f76c-qccxs"] Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.853996 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-695d6f76c-qccxs"] Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.973460 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.973529 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.973617 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:42:27 crc kubenswrapper[4650]: E0201 07:42:27.974131 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:42:27 crc kubenswrapper[4650]: I0201 07:42:27.980299 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" path="/var/lib/kubelet/pods/f3f97afc-40d4-4fc4-be00-1280202c0a31/volumes" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.189791 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7467859e-a792-4959-bd51-d353099352bd","Type":"ContainerStarted","Data":"c23a4e072c8b04bf5e19d74440d7967f46d9f4d8232816b798b219932cdd38dd"} Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.191344 4650 generic.go:334] "Generic (PLEG): container finished" podID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerID="9b8fd444628a9918940b0e1c1430c239ee56c8ad25b17dc99bbf6bdeba93b974" exitCode=0 Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.191468 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.192162 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" event={"ID":"4f5171ae-f202-4f51-a05b-c13e7136959c","Type":"ContainerDied","Data":"9b8fd444628a9918940b0e1c1430c239ee56c8ad25b17dc99bbf6bdeba93b974"} Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.218970 4650 scope.go:117] "RemoveContainer" containerID="92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.444259 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.486087 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-nb\") pod \"510ee4f3-4c57-44cf-8e18-32b303ace963\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.486144 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-dns-svc\") pod \"510ee4f3-4c57-44cf-8e18-32b303ace963\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.486171 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-sb\") pod \"510ee4f3-4c57-44cf-8e18-32b303ace963\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.486429 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds8hh\" (UniqueName: \"kubernetes.io/projected/510ee4f3-4c57-44cf-8e18-32b303ace963-kube-api-access-ds8hh\") pod \"510ee4f3-4c57-44cf-8e18-32b303ace963\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.486547 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-config\") pod \"510ee4f3-4c57-44cf-8e18-32b303ace963\" (UID: \"510ee4f3-4c57-44cf-8e18-32b303ace963\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.513907 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "510ee4f3-4c57-44cf-8e18-32b303ace963" (UID: "510ee4f3-4c57-44cf-8e18-32b303ace963"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.514177 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-config" (OuterVolumeSpecName: "config") pod "510ee4f3-4c57-44cf-8e18-32b303ace963" (UID: "510ee4f3-4c57-44cf-8e18-32b303ace963"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.513932 4650 scope.go:117] "RemoveContainer" containerID="112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce" Feb 01 07:42:28 crc kubenswrapper[4650]: E0201 07:42:28.516903 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce\": container with ID starting with 112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce not found: ID does not exist" containerID="112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.516974 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce"} err="failed to get container status \"112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce\": rpc error: code = NotFound desc = could not find container \"112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce\": container with ID starting with 112391bf4fb6c72a11e97b41c4000f17d32e1ef908ec337d0696c0c7f970a9ce not found: ID does not exist" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.517001 4650 scope.go:117] "RemoveContainer" containerID="92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.520356 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:28 crc kubenswrapper[4650]: E0201 07:42:28.520806 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10\": container with ID starting with 92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10 not found: ID does not exist" containerID="92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.520912 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10"} err="failed to get container status \"92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10\": rpc error: code = NotFound desc = could not find container \"92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10\": container with ID starting with 92e7d65631ca2cdf85d94e108c984e0abaad1a4071ab0be5c98f694619635f10 not found: ID does not exist" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.521670 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/510ee4f3-4c57-44cf-8e18-32b303ace963-kube-api-access-ds8hh" (OuterVolumeSpecName: "kube-api-access-ds8hh") pod "510ee4f3-4c57-44cf-8e18-32b303ace963" (UID: "510ee4f3-4c57-44cf-8e18-32b303ace963"). InnerVolumeSpecName "kube-api-access-ds8hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.553823 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "510ee4f3-4c57-44cf-8e18-32b303ace963" (UID: "510ee4f3-4c57-44cf-8e18-32b303ace963"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.554328 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "510ee4f3-4c57-44cf-8e18-32b303ace963" (UID: "510ee4f3-4c57-44cf-8e18-32b303ace963"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.588585 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-d5c446bd9-7rlx2"] Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589137 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-combined-ca-bundle\") pod \"b243d67e-b432-4b66-aa65-05cdbc100cb7\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589189 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dvxp\" (UniqueName: \"kubernetes.io/projected/b243d67e-b432-4b66-aa65-05cdbc100cb7-kube-api-access-7dvxp\") pod \"b243d67e-b432-4b66-aa65-05cdbc100cb7\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589243 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-sg-core-conf-yaml\") pod \"b243d67e-b432-4b66-aa65-05cdbc100cb7\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589268 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-run-httpd\") pod \"b243d67e-b432-4b66-aa65-05cdbc100cb7\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589422 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-log-httpd\") pod \"b243d67e-b432-4b66-aa65-05cdbc100cb7\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589483 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-config-data\") pod \"b243d67e-b432-4b66-aa65-05cdbc100cb7\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589505 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-scripts\") pod \"b243d67e-b432-4b66-aa65-05cdbc100cb7\" (UID: \"b243d67e-b432-4b66-aa65-05cdbc100cb7\") " Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589618 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b243d67e-b432-4b66-aa65-05cdbc100cb7" (UID: "b243d67e-b432-4b66-aa65-05cdbc100cb7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.589858 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b243d67e-b432-4b66-aa65-05cdbc100cb7" (UID: "b243d67e-b432-4b66-aa65-05cdbc100cb7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.590239 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.590259 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.590270 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.590279 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.590287 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds8hh\" (UniqueName: \"kubernetes.io/projected/510ee4f3-4c57-44cf-8e18-32b303ace963-kube-api-access-ds8hh\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.590297 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b243d67e-b432-4b66-aa65-05cdbc100cb7-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.590305 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/510ee4f3-4c57-44cf-8e18-32b303ace963-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.595432 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-scripts" (OuterVolumeSpecName: "scripts") pod "b243d67e-b432-4b66-aa65-05cdbc100cb7" (UID: "b243d67e-b432-4b66-aa65-05cdbc100cb7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.607517 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b243d67e-b432-4b66-aa65-05cdbc100cb7-kube-api-access-7dvxp" (OuterVolumeSpecName: "kube-api-access-7dvxp") pod "b243d67e-b432-4b66-aa65-05cdbc100cb7" (UID: "b243d67e-b432-4b66-aa65-05cdbc100cb7"). InnerVolumeSpecName "kube-api-access-7dvxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.693316 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dvxp\" (UniqueName: \"kubernetes.io/projected/b243d67e-b432-4b66-aa65-05cdbc100cb7-kube-api-access-7dvxp\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.693347 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:28 crc kubenswrapper[4650]: I0201 07:42:28.946269 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.034188 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b243d67e-b432-4b66-aa65-05cdbc100cb7" (UID: "b243d67e-b432-4b66-aa65-05cdbc100cb7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.098426 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b243d67e-b432-4b66-aa65-05cdbc100cb7" (UID: "b243d67e-b432-4b66-aa65-05cdbc100cb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.121371 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.121413 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.128306 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-config-data" (OuterVolumeSpecName: "config-data") pod "b243d67e-b432-4b66-aa65-05cdbc100cb7" (UID: "b243d67e-b432-4b66-aa65-05cdbc100cb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.213201 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b243d67e-b432-4b66-aa65-05cdbc100cb7","Type":"ContainerDied","Data":"b63abc3cf3d3d58eca7d271f11cbe4456a1e1d164b6eb75a5b49953f36e03ae8"} Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.213250 4650 scope.go:117] "RemoveContainer" containerID="ad42355a134d7477668342e74df4ce08c5c1535c48ba45ee58f5937d2ccbe086" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.213261 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.217077 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" event={"ID":"510ee4f3-4c57-44cf-8e18-32b303ace963","Type":"ContainerDied","Data":"0498fe6ffd00bec85569129879f0f60dcb5271bf75d74ef4846800ead9306f1d"} Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.217177 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798d46d59c-f9x7p" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.226416 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b243d67e-b432-4b66-aa65-05cdbc100cb7-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.269350 4650 scope.go:117] "RemoveContainer" containerID="85595216f4289a1ec765699bd7fe57174cf0eed41e79b56b03aa606b0d164083" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.290586 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-d5c446bd9-7rlx2" event={"ID":"810a735e-844d-434e-aa3f-7ac5421d1303","Type":"ContainerStarted","Data":"4682de86a5fd7af39d3b86fede9892190c9928568f1f3bf2d23c507eedd397c8"} Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.311439 4650 scope.go:117] "RemoveContainer" containerID="e8583e40abe46d8cdc8a77461093489f5ae34b53d356d113956d6178480920dc" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.311583 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" event={"ID":"4f5171ae-f202-4f51-a05b-c13e7136959c","Type":"ContainerStarted","Data":"b590b8fe5f73e6c43905b4cc248783bff820334eca133a956c67355237676a1b"} Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.322501 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-798d46d59c-f9x7p"] Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.341286 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-798d46d59c-f9x7p"] Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.353946 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.379667 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402231 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:29 crc kubenswrapper[4650]: E0201 07:42:29.402619 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="sg-core" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402651 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="sg-core" Feb 01 07:42:29 crc kubenswrapper[4650]: E0201 07:42:29.402672 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="proxy-httpd" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402680 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="proxy-httpd" Feb 01 07:42:29 crc kubenswrapper[4650]: E0201 07:42:29.402696 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-notification-agent" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402705 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-notification-agent" Feb 01 07:42:29 crc kubenswrapper[4650]: E0201 07:42:29.402715 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402722 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon" Feb 01 07:42:29 crc kubenswrapper[4650]: E0201 07:42:29.402734 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="510ee4f3-4c57-44cf-8e18-32b303ace963" containerName="init" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402741 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="510ee4f3-4c57-44cf-8e18-32b303ace963" containerName="init" Feb 01 07:42:29 crc kubenswrapper[4650]: E0201 07:42:29.402753 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-central-agent" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402761 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-central-agent" Feb 01 07:42:29 crc kubenswrapper[4650]: E0201 07:42:29.402775 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon-log" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.402782 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon-log" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.403018 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.403128 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="sg-core" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.403138 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="proxy-httpd" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.403169 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-central-agent" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.403182 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3f97afc-40d4-4fc4-be00-1280202c0a31" containerName="horizon-log" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.403195 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="510ee4f3-4c57-44cf-8e18-32b303ace963" containerName="init" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.403208 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" containerName="ceilometer-notification-agent" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.404692 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.410484 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" podStartSLOduration=5.410465599 podStartE2EDuration="5.410465599s" podCreationTimestamp="2026-02-01 07:42:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:29.370871357 +0000 UTC m=+1148.093969602" watchObservedRunningTime="2026-02-01 07:42:29.410465599 +0000 UTC m=+1148.133563844" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.415017 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.415291 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.428519 4650 scope.go:117] "RemoveContainer" containerID="c727b8fe9386e32e0f907f7dd32d6dc2f2f2b3f3323c8ca2015c7aa290fbab08" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.437175 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.537794 4650 scope.go:117] "RemoveContainer" containerID="db63f7fc657cdce1d0345f72f407a2a1e5fef86444489a36a3e4677ad777016b" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.541293 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzmml\" (UniqueName: \"kubernetes.io/projected/680e3982-832f-42e6-a5ba-6375217b266b-kube-api-access-nzmml\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.541371 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.541392 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.541437 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-run-httpd\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.541473 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-config-data\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.541490 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-log-httpd\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.541522 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-scripts\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.643715 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzmml\" (UniqueName: \"kubernetes.io/projected/680e3982-832f-42e6-a5ba-6375217b266b-kube-api-access-nzmml\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.644199 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.644297 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.644433 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-run-httpd\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.644552 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-config-data\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.644643 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-log-httpd\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.644777 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-scripts\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.645902 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-log-httpd\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.646100 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-run-httpd\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.650618 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-config-data\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.650641 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.652099 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-scripts\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.652407 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.678537 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzmml\" (UniqueName: \"kubernetes.io/projected/680e3982-832f-42e6-a5ba-6375217b266b-kube-api-access-nzmml\") pod \"ceilometer-0\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.769504 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.976518 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="510ee4f3-4c57-44cf-8e18-32b303ace963" path="/var/lib/kubelet/pods/510ee4f3-4c57-44cf-8e18-32b303ace963/volumes" Feb 01 07:42:29 crc kubenswrapper[4650]: I0201 07:42:29.978060 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b243d67e-b432-4b66-aa65-05cdbc100cb7" path="/var/lib/kubelet/pods/b243d67e-b432-4b66-aa65-05cdbc100cb7/volumes" Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.123012 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.330245 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-d5c446bd9-7rlx2" event={"ID":"810a735e-844d-434e-aa3f-7ac5421d1303","Type":"ContainerStarted","Data":"c133e3a0e3a200f49c71bf1ad74c5ded0edc44d67fa7adec5fc45f4b55120ac9"} Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.337660 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3","Type":"ContainerStarted","Data":"47ee45d492ddf99c212b749e2a1492cec63dae86cdc46928cae0570d7d2ae3a6"} Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.346932 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/2.log" Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.347377 4650 generic.go:334] "Generic (PLEG): container finished" podID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerID="d3b2ef07e4b18ed02881615aefe34055c60d3300d0846ea348409e2dfae6c9ae" exitCode=0 Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.347443 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerDied","Data":"d3b2ef07e4b18ed02881615aefe34055c60d3300d0846ea348409e2dfae6c9ae"} Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.364892 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api-log" containerID="cri-o://c23a4e072c8b04bf5e19d74440d7967f46d9f4d8232816b798b219932cdd38dd" gracePeriod=30 Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.365175 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7467859e-a792-4959-bd51-d353099352bd","Type":"ContainerStarted","Data":"37cd4406ada2c9a6fa17ddf90e372c2026b5e351b6825103b029e49fa4c52eaf"} Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.365213 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.365434 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api" containerID="cri-o://37cd4406ada2c9a6fa17ddf90e372c2026b5e351b6825103b029e49fa4c52eaf" gracePeriod=30 Feb 01 07:42:30 crc kubenswrapper[4650]: I0201 07:42:30.404655 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.404634162 podStartE2EDuration="6.404634162s" podCreationTimestamp="2026-02-01 07:42:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:30.393160852 +0000 UTC m=+1149.116259107" watchObservedRunningTime="2026-02-01 07:42:30.404634162 +0000 UTC m=+1149.127732407" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.259109 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7c95c4fc5d-sj2l8"] Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.262175 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.264098 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.265470 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.293571 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c95c4fc5d-sj2l8"] Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.397091 4650 generic.go:334] "Generic (PLEG): container finished" podID="7467859e-a792-4959-bd51-d353099352bd" containerID="c23a4e072c8b04bf5e19d74440d7967f46d9f4d8232816b798b219932cdd38dd" exitCode=143 Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.397155 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7467859e-a792-4959-bd51-d353099352bd","Type":"ContainerDied","Data":"c23a4e072c8b04bf5e19d74440d7967f46d9f4d8232816b798b219932cdd38dd"} Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.401107 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-d5c446bd9-7rlx2" event={"ID":"810a735e-844d-434e-aa3f-7ac5421d1303","Type":"ContainerStarted","Data":"e96fb6eb7135e29aa0f5e09938e4ab315764d99b2be2950a3687d24233b8821c"} Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.401218 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.408545 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-config-data-custom\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.408590 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-config-data\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.408655 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1757f711-f748-4782-8075-cc9ae3b3275c-logs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.408690 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-combined-ca-bundle\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.408800 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-internal-tls-certs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.408884 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-public-tls-certs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.408905 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wxs4\" (UniqueName: \"kubernetes.io/projected/1757f711-f748-4782-8075-cc9ae3b3275c-kube-api-access-9wxs4\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.434782 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-d5c446bd9-7rlx2" podStartSLOduration=6.434762931 podStartE2EDuration="6.434762931s" podCreationTimestamp="2026-02-01 07:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:31.420153 +0000 UTC m=+1150.143251255" watchObservedRunningTime="2026-02-01 07:42:31.434762931 +0000 UTC m=+1150.157861176" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.510609 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-internal-tls-certs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.510667 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-public-tls-certs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.510686 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wxs4\" (UniqueName: \"kubernetes.io/projected/1757f711-f748-4782-8075-cc9ae3b3275c-kube-api-access-9wxs4\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.510749 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-config-data-custom\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.510769 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-config-data\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.510852 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1757f711-f748-4782-8075-cc9ae3b3275c-logs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.510897 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-combined-ca-bundle\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.511929 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1757f711-f748-4782-8075-cc9ae3b3275c-logs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.520669 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-config-data-custom\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.521395 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-public-tls-certs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.522856 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-combined-ca-bundle\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.524501 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-internal-tls-certs\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.537963 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1757f711-f748-4782-8075-cc9ae3b3275c-config-data\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.537987 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wxs4\" (UniqueName: \"kubernetes.io/projected/1757f711-f748-4782-8075-cc9ae3b3275c-kube-api-access-9wxs4\") pod \"barbican-api-7c95c4fc5d-sj2l8\" (UID: \"1757f711-f748-4782-8075-cc9ae3b3275c\") " pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.578254 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.760228 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/2.log" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.763936 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.816008 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-ovndb-tls-certs\") pod \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.816061 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-config\") pod \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.816088 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwjdk\" (UniqueName: \"kubernetes.io/projected/d8cab23a-57a2-432e-9aa8-1ffc44434d58-kube-api-access-bwjdk\") pod \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.816144 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-httpd-config\") pod \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.816229 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-combined-ca-bundle\") pod \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\" (UID: \"d8cab23a-57a2-432e-9aa8-1ffc44434d58\") " Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.820928 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8cab23a-57a2-432e-9aa8-1ffc44434d58-kube-api-access-bwjdk" (OuterVolumeSpecName: "kube-api-access-bwjdk") pod "d8cab23a-57a2-432e-9aa8-1ffc44434d58" (UID: "d8cab23a-57a2-432e-9aa8-1ffc44434d58"). InnerVolumeSpecName "kube-api-access-bwjdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.838500 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "d8cab23a-57a2-432e-9aa8-1ffc44434d58" (UID: "d8cab23a-57a2-432e-9aa8-1ffc44434d58"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.922606 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwjdk\" (UniqueName: \"kubernetes.io/projected/d8cab23a-57a2-432e-9aa8-1ffc44434d58-kube-api-access-bwjdk\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.922629 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.937784 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8cab23a-57a2-432e-9aa8-1ffc44434d58" (UID: "d8cab23a-57a2-432e-9aa8-1ffc44434d58"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.981780 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-config" (OuterVolumeSpecName: "config") pod "d8cab23a-57a2-432e-9aa8-1ffc44434d58" (UID: "d8cab23a-57a2-432e-9aa8-1ffc44434d58"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:31 crc kubenswrapper[4650]: I0201 07:42:31.986444 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "d8cab23a-57a2-432e-9aa8-1ffc44434d58" (UID: "d8cab23a-57a2-432e-9aa8-1ffc44434d58"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.026308 4650 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.026357 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.026368 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8cab23a-57a2-432e-9aa8-1ffc44434d58-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.284004 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.323895 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c95c4fc5d-sj2l8"] Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.413169 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-8466dd5d47-jv8ww" event={"ID":"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa","Type":"ContainerStarted","Data":"f456c8d7a334f968b4c4f8f6ccb891f2c2a7ad46a2d109dd71947bb58acb947d"} Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.413209 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-8466dd5d47-jv8ww" event={"ID":"8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa","Type":"ContainerStarted","Data":"bfaca294500aa87fffcf0f8c09faeb3a7e90d8adfa9e85f1586cad7f9fd0fe43"} Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.415147 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerStarted","Data":"847085f4244c4ef8a38d21469cfc07b302736e4f3ef5f4604f6f2c6386ad5c44"} Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.418071 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" event={"ID":"4ac48219-ace9-4dec-a04a-c710e730a1d4","Type":"ContainerStarted","Data":"b60c740ad9cf50181dc6dd3711f50d236bf51231fa2f25bc5c2a16a3520674dc"} Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.419124 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" event={"ID":"1757f711-f748-4782-8075-cc9ae3b3275c","Type":"ContainerStarted","Data":"c8eaedcadd5b854fb2c645a45de5f3b37f935d6c6b595528363fe10e33c82a42"} Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.420226 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-85ff8d5c86-hrgmh_d8cab23a-57a2-432e-9aa8-1ffc44434d58/neutron-httpd/2.log" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.421013 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85ff8d5c86-hrgmh" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.421430 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85ff8d5c86-hrgmh" event={"ID":"d8cab23a-57a2-432e-9aa8-1ffc44434d58","Type":"ContainerDied","Data":"eab2f92a1e1098a97d366301d4866d1822f89b35f7a7301a8d37606efcc8d925"} Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.421454 4650 scope.go:117] "RemoveContainer" containerID="4eeac89e42f7d84ac549f5e4414dbc7ad5341ac21121b624c78eb8dcc1fba363" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.438909 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-8466dd5d47-jv8ww" podStartSLOduration=2.792178625 podStartE2EDuration="9.438892042s" podCreationTimestamp="2026-02-01 07:42:23 +0000 UTC" firstStartedPulling="2026-02-01 07:42:25.088776885 +0000 UTC m=+1143.811875130" lastFinishedPulling="2026-02-01 07:42:31.735490302 +0000 UTC m=+1150.458588547" observedRunningTime="2026-02-01 07:42:32.432313341 +0000 UTC m=+1151.155411586" watchObservedRunningTime="2026-02-01 07:42:32.438892042 +0000 UTC m=+1151.161990297" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.465824 4650 scope.go:117] "RemoveContainer" containerID="d3b2ef07e4b18ed02881615aefe34055c60d3300d0846ea348409e2dfae6c9ae" Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.468552 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85ff8d5c86-hrgmh"] Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.487749 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-85ff8d5c86-hrgmh"] Feb 01 07:42:32 crc kubenswrapper[4650]: I0201 07:42:32.548140 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:42:32 crc kubenswrapper[4650]: E0201 07:42:32.549105 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:42:32 crc kubenswrapper[4650]: E0201 07:42:32.549184 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:44:34.549166558 +0000 UTC m=+1273.272264803 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.451031 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerStarted","Data":"c2f795c6639bb341610be8515b54993b33c481acbb80cec36b066ad8389f452f"} Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.453802 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" event={"ID":"4ac48219-ace9-4dec-a04a-c710e730a1d4","Type":"ContainerStarted","Data":"d28ad9bd96e09e0c3c30a58ce8a5aa955fabb63964c6cd29563d33fd812f9214"} Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.456805 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3","Type":"ContainerStarted","Data":"d2104cef9159458fef09de62d504b58ef50115e5fbb922b36cffb24d75b6fa53"} Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.460361 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" event={"ID":"1757f711-f748-4782-8075-cc9ae3b3275c","Type":"ContainerStarted","Data":"e6f4e441eafcda6c447c37a8d0f9698c796190c8425feed9020972c397e474ff"} Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.460390 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" event={"ID":"1757f711-f748-4782-8075-cc9ae3b3275c","Type":"ContainerStarted","Data":"46f690721ea3e8974647d315a47607bd01c3ba056fd5fea06fc80327f8a8fbae"} Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.460404 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.460807 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.483464 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-595fb9c59d-bnj8v" podStartSLOduration=4.10551405 podStartE2EDuration="10.483445868s" podCreationTimestamp="2026-02-01 07:42:23 +0000 UTC" firstStartedPulling="2026-02-01 07:42:25.357889642 +0000 UTC m=+1144.080987887" lastFinishedPulling="2026-02-01 07:42:31.73582146 +0000 UTC m=+1150.458919705" observedRunningTime="2026-02-01 07:42:33.478097609 +0000 UTC m=+1152.201195864" watchObservedRunningTime="2026-02-01 07:42:33.483445868 +0000 UTC m=+1152.206544113" Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.505825 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.894840554 podStartE2EDuration="9.505807152s" podCreationTimestamp="2026-02-01 07:42:24 +0000 UTC" firstStartedPulling="2026-02-01 07:42:25.695348971 +0000 UTC m=+1144.418447216" lastFinishedPulling="2026-02-01 07:42:28.306315569 +0000 UTC m=+1147.029413814" observedRunningTime="2026-02-01 07:42:33.503268085 +0000 UTC m=+1152.226366340" watchObservedRunningTime="2026-02-01 07:42:33.505807152 +0000 UTC m=+1152.228905417" Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.546419 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" podStartSLOduration=2.54639641 podStartE2EDuration="2.54639641s" podCreationTimestamp="2026-02-01 07:42:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:33.53873082 +0000 UTC m=+1152.261829085" watchObservedRunningTime="2026-02-01 07:42:33.54639641 +0000 UTC m=+1152.269494665" Feb 01 07:42:33 crc kubenswrapper[4650]: I0201 07:42:33.975184 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" path="/var/lib/kubelet/pods/d8cab23a-57a2-432e-9aa8-1ffc44434d58/volumes" Feb 01 07:42:34 crc kubenswrapper[4650]: I0201 07:42:34.470227 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerStarted","Data":"3ba13e512980dd5e72e8be2a70e0017c9a01c0d583ca8ce52daaeceb106e2fde"} Feb 01 07:42:34 crc kubenswrapper[4650]: I0201 07:42:34.788136 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.124224 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.206652 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b6c948c7-clpjc"] Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.206885 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" podUID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerName="dnsmasq-dns" containerID="cri-o://2e28eb2efeefda2c6ebc232204aea9cb0de3fc1044bde8103db5ef199d1920c4" gracePeriod=10 Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.484911 4650 generic.go:334] "Generic (PLEG): container finished" podID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerID="2e28eb2efeefda2c6ebc232204aea9cb0de3fc1044bde8103db5ef199d1920c4" exitCode=0 Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.484992 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" event={"ID":"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94","Type":"ContainerDied","Data":"2e28eb2efeefda2c6ebc232204aea9cb0de3fc1044bde8103db5ef199d1920c4"} Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.490365 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerStarted","Data":"8b99acef35160501425f3b881e05342a9a1133c6f987c4265efefab0e6d42f59"} Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.848173 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.936760 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-nb\") pod \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.937217 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t84nv\" (UniqueName: \"kubernetes.io/projected/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-kube-api-access-t84nv\") pod \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.937250 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-sb\") pod \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.937274 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-config\") pod \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.937310 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-dns-svc\") pod \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\" (UID: \"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94\") " Feb 01 07:42:35 crc kubenswrapper[4650]: I0201 07:42:35.996436 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-kube-api-access-t84nv" (OuterVolumeSpecName: "kube-api-access-t84nv") pod "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" (UID: "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94"). InnerVolumeSpecName "kube-api-access-t84nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.045357 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-config" (OuterVolumeSpecName: "config") pod "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" (UID: "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.046376 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" (UID: "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.047339 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t84nv\" (UniqueName: \"kubernetes.io/projected/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-kube-api-access-t84nv\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.047371 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.047379 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.123757 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" (UID: "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.124961 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" (UID: "1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.153147 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.153371 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.500068 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" event={"ID":"1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94","Type":"ContainerDied","Data":"aaa4b5b0660cbc9a9b21e6e02c19f49b54bc99b51f3ba2c146d4b31b70bb6040"} Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.501230 4650 scope.go:117] "RemoveContainer" containerID="2e28eb2efeefda2c6ebc232204aea9cb0de3fc1044bde8103db5ef199d1920c4" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.500117 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6c948c7-clpjc" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.541362 4650 scope.go:117] "RemoveContainer" containerID="07ad40af5d9300bc4229c0e95af4124aa8bd3c4fa7b046b90f5e8e213bf8f783" Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.548140 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b6c948c7-clpjc"] Feb 01 07:42:36 crc kubenswrapper[4650]: I0201 07:42:36.571505 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b6c948c7-clpjc"] Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.162499 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.162559 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.162602 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.163356 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"70e34c59087428be1d52cbbc9d3e74901ae2b55868cca05d2ac2b1cb47ec233d"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.163424 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://70e34c59087428be1d52cbbc9d3e74901ae2b55868cca05d2ac2b1cb47ec233d" gracePeriod=600 Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.510205 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="70e34c59087428be1d52cbbc9d3e74901ae2b55868cca05d2ac2b1cb47ec233d" exitCode=0 Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.510293 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"70e34c59087428be1d52cbbc9d3e74901ae2b55868cca05d2ac2b1cb47ec233d"} Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.510528 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"da08a9d98a15a08dc02cc770b99ef74f8ab41ac5f98a7b2acee0e642f45cbee1"} Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.510555 4650 scope.go:117] "RemoveContainer" containerID="a4559927c25b5172e0bb51589b156030237e4552bdad01ea0a510262dabc0be0" Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.531338 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerStarted","Data":"29d196d29da2073b28128c7fde254db724160d7237c6e660290314f8d4b3f3f7"} Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.533281 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.596174 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.809120467 podStartE2EDuration="8.596155683s" podCreationTimestamp="2026-02-01 07:42:29 +0000 UTC" firstStartedPulling="2026-02-01 07:42:32.34751018 +0000 UTC m=+1151.070608425" lastFinishedPulling="2026-02-01 07:42:37.134545396 +0000 UTC m=+1155.857643641" observedRunningTime="2026-02-01 07:42:37.571269004 +0000 UTC m=+1156.294367249" watchObservedRunningTime="2026-02-01 07:42:37.596155683 +0000 UTC m=+1156.319253928" Feb 01 07:42:37 crc kubenswrapper[4650]: I0201 07:42:37.973888 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" path="/var/lib/kubelet/pods/1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94/volumes" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.452234 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7dcc97964d-q5ws4" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.452241 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7dcc97964d-q5ws4" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.797437 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.812314 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.824678 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.964896 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.965185 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:42:38 crc kubenswrapper[4650]: I0201 07:42:38.965269 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:42:38 crc kubenswrapper[4650]: E0201 07:42:38.965514 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.027216 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.027303 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.028080 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"038fc80dfb9fd47b73607b6e75c77545e7d8c10ea25cbba2f578bdb2c48b96af"} pod="openstack/horizon-5b4d45c6bd-qsdbt" containerMessage="Container horizon failed startup probe, will be restarted" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.028112 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" containerID="cri-o://038fc80dfb9fd47b73607b6e75c77545e7d8c10ea25cbba2f578bdb2c48b96af" gracePeriod=30 Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.144252 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-79fd8b5f84-qg9cv" podUID="9c4bad14-279f-4212-a86d-cea1c9fe7b48" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.144339 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.145092 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"e3cebc6d6781572d06c66f70f152f9956e68027b5900448693f2f7b809a6fd77"} pod="openstack/horizon-79fd8b5f84-qg9cv" containerMessage="Container horizon failed startup probe, will be restarted" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.145131 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-79fd8b5f84-qg9cv" podUID="9c4bad14-279f-4212-a86d-cea1c9fe7b48" containerName="horizon" containerID="cri-o://e3cebc6d6781572d06c66f70f152f9956e68027b5900448693f2f7b809a6fd77" gracePeriod=30 Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.190494 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-8d56c6c66-9jtxc"] Feb 01 07:42:39 crc kubenswrapper[4650]: E0201 07:42:39.191208 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.191317 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: E0201 07:42:39.191396 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerName="init" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.191474 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerName="init" Feb 01 07:42:39 crc kubenswrapper[4650]: E0201 07:42:39.191558 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-api" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.191646 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-api" Feb 01 07:42:39 crc kubenswrapper[4650]: E0201 07:42:39.191721 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.191790 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: E0201 07:42:39.191870 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerName="dnsmasq-dns" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.191947 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerName="dnsmasq-dns" Feb 01 07:42:39 crc kubenswrapper[4650]: E0201 07:42:39.192059 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.192143 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.192439 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.192550 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-api" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.192664 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c37fd9c-cf2e-47b5-b38e-e1a7c8c4cd94" containerName="dnsmasq-dns" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.192744 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.193177 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8cab23a-57a2-432e-9aa8-1ffc44434d58" containerName="neutron-httpd" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.193796 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.227734 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8d56c6c66-9jtxc"] Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.323853 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-public-tls-certs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.323929 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-config-data\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.323945 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-internal-tls-certs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.323992 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-945vt\" (UniqueName: \"kubernetes.io/projected/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-kube-api-access-945vt\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.324014 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-scripts\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.324047 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-combined-ca-bundle\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.324138 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-logs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.411234 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7dcc97964d-q5ws4" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.426106 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-combined-ca-bundle\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.427035 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-logs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.427106 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-public-tls-certs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.427163 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-config-data\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.427184 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-internal-tls-certs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.427227 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-945vt\" (UniqueName: \"kubernetes.io/projected/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-kube-api-access-945vt\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.427249 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-scripts\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.428046 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-logs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.436232 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-internal-tls-certs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.438734 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-config-data\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.443601 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-combined-ca-bundle\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.443887 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-scripts\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.443935 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-public-tls-certs\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.449959 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-945vt\" (UniqueName: \"kubernetes.io/projected/9d6f0da1-e84c-4917-8ec7-6fb5fb34a949-kube-api-access-945vt\") pod \"placement-8d56c6c66-9jtxc\" (UID: \"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949\") " pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:39 crc kubenswrapper[4650]: I0201 07:42:39.523395 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.133662 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8d56c6c66-9jtxc"] Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.304274 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.166:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.344065 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.381864 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.580549 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="cinder-scheduler" containerID="cri-o://47ee45d492ddf99c212b749e2a1492cec63dae86cdc46928cae0570d7d2ae3a6" gracePeriod=30 Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.580787 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8d56c6c66-9jtxc" event={"ID":"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949","Type":"ContainerStarted","Data":"b2d6de46c4c525010090b3789c7990de75eaeb64caa684c8556cd1771bd6d35e"} Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.580818 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8d56c6c66-9jtxc" event={"ID":"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949","Type":"ContainerStarted","Data":"c43687b8e6573bfb59cd7d19b4215d6bfb376274356569336a001c5e4c305c84"} Feb 01 07:42:40 crc kubenswrapper[4650]: I0201 07:42:40.581051 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="probe" containerID="cri-o://d2104cef9159458fef09de62d504b58ef50115e5fbb922b36cffb24d75b6fa53" gracePeriod=30 Feb 01 07:42:41 crc kubenswrapper[4650]: I0201 07:42:41.596316 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8d56c6c66-9jtxc" event={"ID":"9d6f0da1-e84c-4917-8ec7-6fb5fb34a949","Type":"ContainerStarted","Data":"0ce3a850cc7db3be0ae158b6bc4e64c5f9e7431692cfdb12d58de05f0ccca57c"} Feb 01 07:42:41 crc kubenswrapper[4650]: I0201 07:42:41.597810 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:41 crc kubenswrapper[4650]: I0201 07:42:41.597908 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:42:41 crc kubenswrapper[4650]: I0201 07:42:41.620214 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-8d56c6c66-9jtxc" podStartSLOduration=2.620199845 podStartE2EDuration="2.620199845s" podCreationTimestamp="2026-02-01 07:42:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:41.618211054 +0000 UTC m=+1160.341309299" watchObservedRunningTime="2026-02-01 07:42:41.620199845 +0000 UTC m=+1160.343298090" Feb 01 07:42:42 crc kubenswrapper[4650]: I0201 07:42:42.606469 4650 generic.go:334] "Generic (PLEG): container finished" podID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerID="d2104cef9159458fef09de62d504b58ef50115e5fbb922b36cffb24d75b6fa53" exitCode=0 Feb 01 07:42:42 crc kubenswrapper[4650]: I0201 07:42:42.606726 4650 generic.go:334] "Generic (PLEG): container finished" podID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerID="47ee45d492ddf99c212b749e2a1492cec63dae86cdc46928cae0570d7d2ae3a6" exitCode=0 Feb 01 07:42:42 crc kubenswrapper[4650]: I0201 07:42:42.607560 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3","Type":"ContainerDied","Data":"d2104cef9159458fef09de62d504b58ef50115e5fbb922b36cffb24d75b6fa53"} Feb 01 07:42:42 crc kubenswrapper[4650]: I0201 07:42:42.607611 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3","Type":"ContainerDied","Data":"47ee45d492ddf99c212b749e2a1492cec63dae86cdc46928cae0570d7d2ae3a6"} Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.045995 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.113815 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-combined-ca-bundle\") pod \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.113859 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-scripts\") pod \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.113928 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-etc-machine-id\") pod \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.113966 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftrfv\" (UniqueName: \"kubernetes.io/projected/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-kube-api-access-ftrfv\") pod \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.114079 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data-custom\") pod \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.114102 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data\") pod \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\" (UID: \"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3\") " Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.123273 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" (UID: "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.134131 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-scripts" (OuterVolumeSpecName: "scripts") pod "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" (UID: "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.139191 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-kube-api-access-ftrfv" (OuterVolumeSpecName: "kube-api-access-ftrfv") pod "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" (UID: "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3"). InnerVolumeSpecName "kube-api-access-ftrfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.174231 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" (UID: "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.247739 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.247775 4650 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.247787 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftrfv\" (UniqueName: \"kubernetes.io/projected/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-kube-api-access-ftrfv\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.247799 4650 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.304877 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" (UID: "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.344142 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data" (OuterVolumeSpecName: "config-data") pod "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" (UID: "c5c3b2e9-c11d-4af9-84d7-49384c4e39d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.349061 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.349085 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.494357 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7dcc97964d-q5ws4" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.616798 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5c3b2e9-c11d-4af9-84d7-49384c4e39d3","Type":"ContainerDied","Data":"7b01d43dc2ce165685eecdd7b86eab64d6e3bdf4f691306069955c9e5d2d6e81"} Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.616851 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.618045 4650 scope.go:117] "RemoveContainer" containerID="d2104cef9159458fef09de62d504b58ef50115e5fbb922b36cffb24d75b6fa53" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.643291 4650 scope.go:117] "RemoveContainer" containerID="47ee45d492ddf99c212b749e2a1492cec63dae86cdc46928cae0570d7d2ae3a6" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.669684 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.678578 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.702337 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:43 crc kubenswrapper[4650]: E0201 07:42:43.702769 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="cinder-scheduler" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.702784 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="cinder-scheduler" Feb 01 07:42:43 crc kubenswrapper[4650]: E0201 07:42:43.702834 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="probe" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.702841 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="probe" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.703004 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="probe" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.703016 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" containerName="cinder-scheduler" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.704155 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.708332 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.735389 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.857940 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.857979 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/198da077-645a-4341-9c07-6860b5ce0a0d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.858018 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-config-data\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.858055 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.858070 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-scripts\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.858134 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m52d4\" (UniqueName: \"kubernetes.io/projected/198da077-645a-4341-9c07-6860b5ce0a0d-kube-api-access-m52d4\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.959406 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.959653 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/198da077-645a-4341-9c07-6860b5ce0a0d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.959768 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/198da077-645a-4341-9c07-6860b5ce0a0d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.959785 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-config-data\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.959882 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.959909 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-scripts\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.960133 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m52d4\" (UniqueName: \"kubernetes.io/projected/198da077-645a-4341-9c07-6860b5ce0a0d-kube-api-access-m52d4\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.964920 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-config-data\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.966195 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.991685 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.992011 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m52d4\" (UniqueName: \"kubernetes.io/projected/198da077-645a-4341-9c07-6860b5ce0a0d-kube-api-access-m52d4\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.993258 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5c3b2e9-c11d-4af9-84d7-49384c4e39d3" path="/var/lib/kubelet/pods/c5c3b2e9-c11d-4af9-84d7-49384c4e39d3/volumes" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.993999 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:43 crc kubenswrapper[4650]: I0201 07:42:43.994920 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/198da077-645a-4341-9c07-6860b5ce0a0d-scripts\") pod \"cinder-scheduler-0\" (UID: \"198da077-645a-4341-9c07-6860b5ce0a0d\") " pod="openstack/cinder-scheduler-0" Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.031226 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.410289 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7dcc97964d-q5ws4" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.446516 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.632006 4650 generic.go:334] "Generic (PLEG): container finished" podID="9c4bad14-279f-4212-a86d-cea1c9fe7b48" containerID="e3cebc6d6781572d06c66f70f152f9956e68027b5900448693f2f7b809a6fd77" exitCode=0 Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.632086 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79fd8b5f84-qg9cv" event={"ID":"9c4bad14-279f-4212-a86d-cea1c9fe7b48","Type":"ContainerDied","Data":"e3cebc6d6781572d06c66f70f152f9956e68027b5900448693f2f7b809a6fd77"} Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.632113 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79fd8b5f84-qg9cv" event={"ID":"9c4bad14-279f-4212-a86d-cea1c9fe7b48","Type":"ContainerStarted","Data":"6ec146936e107e8c4f3356bf14738f487ee0e742f4ea72c041f14119113b3575"} Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.645918 4650 generic.go:334] "Generic (PLEG): container finished" podID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerID="038fc80dfb9fd47b73607b6e75c77545e7d8c10ea25cbba2f578bdb2c48b96af" exitCode=0 Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.645954 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerDied","Data":"038fc80dfb9fd47b73607b6e75c77545e7d8c10ea25cbba2f578bdb2c48b96af"} Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.645975 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerStarted","Data":"a451bbea895b092fac95434c169fa8820f75c4dcfbd374cb478929932b3b5264"} Feb 01 07:42:44 crc kubenswrapper[4650]: I0201 07:42:44.682955 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 01 07:42:45 crc kubenswrapper[4650]: I0201 07:42:45.059940 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:45 crc kubenswrapper[4650]: I0201 07:42:45.668389 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"198da077-645a-4341-9c07-6860b5ce0a0d","Type":"ContainerStarted","Data":"5452a9c70c9a9c226e37cbd57c1af20cc55de1d0ff2dca9ded190860d9f494b1"} Feb 01 07:42:45 crc kubenswrapper[4650]: I0201 07:42:45.668697 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"198da077-645a-4341-9c07-6860b5ce0a0d","Type":"ContainerStarted","Data":"f93c1d48111abec26e0194b3d84d0dd97392bf2e0444dc00bcfa691b5c9d097c"} Feb 01 07:42:46 crc kubenswrapper[4650]: I0201 07:42:46.598449 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" podUID="1757f711-f748-4782-8075-cc9ae3b3275c" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.169:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 01 07:42:46 crc kubenswrapper[4650]: I0201 07:42:46.677188 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"198da077-645a-4341-9c07-6860b5ce0a0d","Type":"ContainerStarted","Data":"650d8144c964d54d436ed9d61b17bd20b8534d048b359a7191fd2afc09318b90"} Feb 01 07:42:46 crc kubenswrapper[4650]: I0201 07:42:46.702090 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.702067679 podStartE2EDuration="3.702067679s" podCreationTimestamp="2026-02-01 07:42:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:46.693982739 +0000 UTC m=+1165.417081004" watchObservedRunningTime="2026-02-01 07:42:46.702067679 +0000 UTC m=+1165.425165934" Feb 01 07:42:47 crc kubenswrapper[4650]: I0201 07:42:47.001920 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-b4f94b5c6-zjcnl" Feb 01 07:42:49 crc kubenswrapper[4650]: I0201 07:42:49.031907 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 01 07:42:49 crc kubenswrapper[4650]: I0201 07:42:49.344387 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c95c4fc5d-sj2l8" Feb 01 07:42:49 crc kubenswrapper[4650]: I0201 07:42:49.417742 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7dcc97964d-q5ws4"] Feb 01 07:42:49 crc kubenswrapper[4650]: I0201 07:42:49.417956 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7dcc97964d-q5ws4" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api-log" containerID="cri-o://8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f" gracePeriod=30 Feb 01 07:42:49 crc kubenswrapper[4650]: I0201 07:42:49.418365 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7dcc97964d-q5ws4" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api" containerID="cri-o://510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef" gracePeriod=30 Feb 01 07:42:49 crc kubenswrapper[4650]: I0201 07:42:49.703135 4650 generic.go:334] "Generic (PLEG): container finished" podID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerID="8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f" exitCode=143 Feb 01 07:42:49 crc kubenswrapper[4650]: I0201 07:42:49.703406 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7dcc97964d-q5ws4" event={"ID":"60db3db0-16a4-4f77-bbe2-cd46c8b70039","Type":"ContainerDied","Data":"8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f"} Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.689787 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.691067 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.694192 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.694369 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-clngd" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.694526 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.700131 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.841942 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.841992 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.842079 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lp49\" (UniqueName: \"kubernetes.io/projected/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-kube-api-access-6lp49\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.842135 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-openstack-config\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.943885 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.944178 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.944232 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lp49\" (UniqueName: \"kubernetes.io/projected/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-kube-api-access-6lp49\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.944283 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-openstack-config\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.945160 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-openstack-config\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.950546 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.950829 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:51 crc kubenswrapper[4650]: I0201 07:42:51.968631 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lp49\" (UniqueName: \"kubernetes.io/projected/eb39e44a-8146-4d73-bee6-6f5a65ccd5e4-kube-api-access-6lp49\") pod \"openstackclient\" (UID: \"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4\") " pod="openstack/openstackclient" Feb 01 07:42:52 crc kubenswrapper[4650]: I0201 07:42:52.043127 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 01 07:42:52 crc kubenswrapper[4650]: I0201 07:42:52.635345 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 01 07:42:52 crc kubenswrapper[4650]: I0201 07:42:52.730292 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4","Type":"ContainerStarted","Data":"43a1a07e3d88ad8ebd7fe28472f52a235e7927afa1a9d131a06ae19501598df3"} Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.574304 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.692943 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data\") pod \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.692991 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60db3db0-16a4-4f77-bbe2-cd46c8b70039-logs\") pod \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.693039 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwgvd\" (UniqueName: \"kubernetes.io/projected/60db3db0-16a4-4f77-bbe2-cd46c8b70039-kube-api-access-wwgvd\") pod \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.693134 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-combined-ca-bundle\") pod \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.693172 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data-custom\") pod \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\" (UID: \"60db3db0-16a4-4f77-bbe2-cd46c8b70039\") " Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.694217 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60db3db0-16a4-4f77-bbe2-cd46c8b70039-logs" (OuterVolumeSpecName: "logs") pod "60db3db0-16a4-4f77-bbe2-cd46c8b70039" (UID: "60db3db0-16a4-4f77-bbe2-cd46c8b70039"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.707244 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60db3db0-16a4-4f77-bbe2-cd46c8b70039-kube-api-access-wwgvd" (OuterVolumeSpecName: "kube-api-access-wwgvd") pod "60db3db0-16a4-4f77-bbe2-cd46c8b70039" (UID: "60db3db0-16a4-4f77-bbe2-cd46c8b70039"). InnerVolumeSpecName "kube-api-access-wwgvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.716149 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "60db3db0-16a4-4f77-bbe2-cd46c8b70039" (UID: "60db3db0-16a4-4f77-bbe2-cd46c8b70039"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.748188 4650 generic.go:334] "Generic (PLEG): container finished" podID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerID="510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef" exitCode=0 Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.748231 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7dcc97964d-q5ws4" event={"ID":"60db3db0-16a4-4f77-bbe2-cd46c8b70039","Type":"ContainerDied","Data":"510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef"} Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.748257 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7dcc97964d-q5ws4" event={"ID":"60db3db0-16a4-4f77-bbe2-cd46c8b70039","Type":"ContainerDied","Data":"1fb375e1584c6381e07c347d90d8099ac3c9246a052c136ec773ed0d7a9300eb"} Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.748281 4650 scope.go:117] "RemoveContainer" containerID="510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.748395 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7dcc97964d-q5ws4" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.754730 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "60db3db0-16a4-4f77-bbe2-cd46c8b70039" (UID: "60db3db0-16a4-4f77-bbe2-cd46c8b70039"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.775230 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data" (OuterVolumeSpecName: "config-data") pod "60db3db0-16a4-4f77-bbe2-cd46c8b70039" (UID: "60db3db0-16a4-4f77-bbe2-cd46c8b70039"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.796540 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.796572 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60db3db0-16a4-4f77-bbe2-cd46c8b70039-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.796582 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwgvd\" (UniqueName: \"kubernetes.io/projected/60db3db0-16a4-4f77-bbe2-cd46c8b70039-kube-api-access-wwgvd\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.796593 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.796602 4650 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/60db3db0-16a4-4f77-bbe2-cd46c8b70039-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.803627 4650 scope.go:117] "RemoveContainer" containerID="8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.821255 4650 scope.go:117] "RemoveContainer" containerID="510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef" Feb 01 07:42:53 crc kubenswrapper[4650]: E0201 07:42:53.825180 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef\": container with ID starting with 510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef not found: ID does not exist" containerID="510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.825234 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef"} err="failed to get container status \"510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef\": rpc error: code = NotFound desc = could not find container \"510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef\": container with ID starting with 510fdf5d04f92cafd279fe6b771f18c639d31c04c9fb53ddf8bf52d631fae1ef not found: ID does not exist" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.825257 4650 scope.go:117] "RemoveContainer" containerID="8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f" Feb 01 07:42:53 crc kubenswrapper[4650]: E0201 07:42:53.827363 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f\": container with ID starting with 8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f not found: ID does not exist" containerID="8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.827408 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f"} err="failed to get container status \"8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f\": rpc error: code = NotFound desc = could not find container \"8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f\": container with ID starting with 8a036b7a985593957422949ba28712f5ef297a0981f6fbe8d4cecf0c555da26f not found: ID does not exist" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.965365 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.965433 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:42:53 crc kubenswrapper[4650]: I0201 07:42:53.965522 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:42:53 crc kubenswrapper[4650]: E0201 07:42:53.965811 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.015785 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.016706 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.017642 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.069070 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7dcc97964d-q5ws4"] Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.079857 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7dcc97964d-q5ws4"] Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.133986 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.134424 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.135304 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-79fd8b5f84-qg9cv" podUID="9c4bad14-279f-4212-a86d-cea1c9fe7b48" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.393354 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.480683 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-599d7597b9-mh6hj"] Feb 01 07:42:54 crc kubenswrapper[4650]: E0201 07:42:54.481557 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api-log" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.481623 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api-log" Feb 01 07:42:54 crc kubenswrapper[4650]: E0201 07:42:54.481688 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.481737 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.481957 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api-log" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.482019 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" containerName="barbican-api" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.482931 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.485368 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.485546 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.505385 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-599d7597b9-mh6hj"] Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609073 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39a11122-6fd9-463b-8194-c098d9e764ec-log-httpd\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609167 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-config-data\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609208 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39a11122-6fd9-463b-8194-c098d9e764ec-run-httpd\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609231 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39a11122-6fd9-463b-8194-c098d9e764ec-etc-swift\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609278 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-public-tls-certs\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609304 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-internal-tls-certs\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609389 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-combined-ca-bundle\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.609409 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c9ws\" (UniqueName: \"kubernetes.io/projected/39a11122-6fd9-463b-8194-c098d9e764ec-kube-api-access-6c9ws\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.636708 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.636956 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-central-agent" containerID="cri-o://c2f795c6639bb341610be8515b54993b33c481acbb80cec36b066ad8389f452f" gracePeriod=30 Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.637095 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="proxy-httpd" containerID="cri-o://29d196d29da2073b28128c7fde254db724160d7237c6e660290314f8d4b3f3f7" gracePeriod=30 Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.637141 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="sg-core" containerID="cri-o://8b99acef35160501425f3b881e05342a9a1133c6f987c4265efefab0e6d42f59" gracePeriod=30 Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.637175 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-notification-agent" containerID="cri-o://3ba13e512980dd5e72e8be2a70e0017c9a01c0d583ca8ce52daaeceb106e2fde" gracePeriod=30 Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.713933 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39a11122-6fd9-463b-8194-c098d9e764ec-log-httpd\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.713999 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-config-data\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.714041 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39a11122-6fd9-463b-8194-c098d9e764ec-run-httpd\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.714067 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39a11122-6fd9-463b-8194-c098d9e764ec-etc-swift\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.714127 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-public-tls-certs\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.714158 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-internal-tls-certs\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.714215 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-combined-ca-bundle\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.714235 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c9ws\" (UniqueName: \"kubernetes.io/projected/39a11122-6fd9-463b-8194-c098d9e764ec-kube-api-access-6c9ws\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.714436 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39a11122-6fd9-463b-8194-c098d9e764ec-log-httpd\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.720250 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-public-tls-certs\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.722434 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39a11122-6fd9-463b-8194-c098d9e764ec-run-httpd\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.723679 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39a11122-6fd9-463b-8194-c098d9e764ec-etc-swift\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.723940 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-internal-tls-certs\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.736852 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-combined-ca-bundle\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.741639 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c9ws\" (UniqueName: \"kubernetes.io/projected/39a11122-6fd9-463b-8194-c098d9e764ec-kube-api-access-6c9ws\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.756931 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.168:3000/\": read tcp 10.217.0.2:54634->10.217.0.168:3000: read: connection reset by peer" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.772090 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39a11122-6fd9-463b-8194-c098d9e764ec-config-data\") pod \"swift-proxy-599d7597b9-mh6hj\" (UID: \"39a11122-6fd9-463b-8194-c098d9e764ec\") " pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:54 crc kubenswrapper[4650]: I0201 07:42:54.799424 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.494549 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-599d7597b9-mh6hj"] Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834168 4650 generic.go:334] "Generic (PLEG): container finished" podID="680e3982-832f-42e6-a5ba-6375217b266b" containerID="29d196d29da2073b28128c7fde254db724160d7237c6e660290314f8d4b3f3f7" exitCode=0 Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834434 4650 generic.go:334] "Generic (PLEG): container finished" podID="680e3982-832f-42e6-a5ba-6375217b266b" containerID="8b99acef35160501425f3b881e05342a9a1133c6f987c4265efefab0e6d42f59" exitCode=2 Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834443 4650 generic.go:334] "Generic (PLEG): container finished" podID="680e3982-832f-42e6-a5ba-6375217b266b" containerID="3ba13e512980dd5e72e8be2a70e0017c9a01c0d583ca8ce52daaeceb106e2fde" exitCode=0 Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834450 4650 generic.go:334] "Generic (PLEG): container finished" podID="680e3982-832f-42e6-a5ba-6375217b266b" containerID="c2f795c6639bb341610be8515b54993b33c481acbb80cec36b066ad8389f452f" exitCode=0 Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834497 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerDied","Data":"29d196d29da2073b28128c7fde254db724160d7237c6e660290314f8d4b3f3f7"} Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834521 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerDied","Data":"8b99acef35160501425f3b881e05342a9a1133c6f987c4265efefab0e6d42f59"} Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834531 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerDied","Data":"3ba13e512980dd5e72e8be2a70e0017c9a01c0d583ca8ce52daaeceb106e2fde"} Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.834539 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerDied","Data":"c2f795c6639bb341610be8515b54993b33c481acbb80cec36b066ad8389f452f"} Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.852130 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"4aeae73f2ffbe62127ec55e972cf8ef1c777d97f5ed74c093c274ce0aa069bc5"} Feb 01 07:42:55 crc kubenswrapper[4650]: I0201 07:42:55.988163 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60db3db0-16a4-4f77-bbe2-cd46c8b70039" path="/var/lib/kubelet/pods/60db3db0-16a4-4f77-bbe2-cd46c8b70039/volumes" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.365227 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.404813 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-d5c446bd9-7rlx2" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.478890 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-78c5fb6df7-xcnvd"] Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.479144 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-78c5fb6df7-xcnvd" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-api" containerID="cri-o://cca07a3ed44421c1c2ddae2db88b3fa3f088622070c2075bd1008da4c9b836d4" gracePeriod=30 Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.479863 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-78c5fb6df7-xcnvd" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-httpd" containerID="cri-o://2d7be25a36bfba8dcfd682fcb26b455bbe9583c5afec760041cc8bcb3f5e8526" gracePeriod=30 Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.504294 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzmml\" (UniqueName: \"kubernetes.io/projected/680e3982-832f-42e6-a5ba-6375217b266b-kube-api-access-nzmml\") pod \"680e3982-832f-42e6-a5ba-6375217b266b\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.504375 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-sg-core-conf-yaml\") pod \"680e3982-832f-42e6-a5ba-6375217b266b\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.504397 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-combined-ca-bundle\") pod \"680e3982-832f-42e6-a5ba-6375217b266b\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.504437 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-config-data\") pod \"680e3982-832f-42e6-a5ba-6375217b266b\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.504466 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-log-httpd\") pod \"680e3982-832f-42e6-a5ba-6375217b266b\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.504544 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-scripts\") pod \"680e3982-832f-42e6-a5ba-6375217b266b\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.504733 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-run-httpd\") pod \"680e3982-832f-42e6-a5ba-6375217b266b\" (UID: \"680e3982-832f-42e6-a5ba-6375217b266b\") " Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.506350 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "680e3982-832f-42e6-a5ba-6375217b266b" (UID: "680e3982-832f-42e6-a5ba-6375217b266b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.512402 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "680e3982-832f-42e6-a5ba-6375217b266b" (UID: "680e3982-832f-42e6-a5ba-6375217b266b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.531169 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/680e3982-832f-42e6-a5ba-6375217b266b-kube-api-access-nzmml" (OuterVolumeSpecName: "kube-api-access-nzmml") pod "680e3982-832f-42e6-a5ba-6375217b266b" (UID: "680e3982-832f-42e6-a5ba-6375217b266b"). InnerVolumeSpecName "kube-api-access-nzmml". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.568864 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-scripts" (OuterVolumeSpecName: "scripts") pod "680e3982-832f-42e6-a5ba-6375217b266b" (UID: "680e3982-832f-42e6-a5ba-6375217b266b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.643461 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.643500 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.643511 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzmml\" (UniqueName: \"kubernetes.io/projected/680e3982-832f-42e6-a5ba-6375217b266b-kube-api-access-nzmml\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.643522 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/680e3982-832f-42e6-a5ba-6375217b266b-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.709782 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "680e3982-832f-42e6-a5ba-6375217b266b" (UID: "680e3982-832f-42e6-a5ba-6375217b266b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.717857 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "680e3982-832f-42e6-a5ba-6375217b266b" (UID: "680e3982-832f-42e6-a5ba-6375217b266b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.755219 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.755524 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.825191 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-config-data" (OuterVolumeSpecName: "config-data") pod "680e3982-832f-42e6-a5ba-6375217b266b" (UID: "680e3982-832f-42e6-a5ba-6375217b266b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.859051 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/680e3982-832f-42e6-a5ba-6375217b266b-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.885162 4650 generic.go:334] "Generic (PLEG): container finished" podID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerID="2d7be25a36bfba8dcfd682fcb26b455bbe9583c5afec760041cc8bcb3f5e8526" exitCode=0 Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.885229 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5fb6df7-xcnvd" event={"ID":"7bcbe498-d2bb-4ad5-87dd-f2896380acfe","Type":"ContainerDied","Data":"2d7be25a36bfba8dcfd682fcb26b455bbe9583c5afec760041cc8bcb3f5e8526"} Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.899135 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"680e3982-832f-42e6-a5ba-6375217b266b","Type":"ContainerDied","Data":"847085f4244c4ef8a38d21469cfc07b302736e4f3ef5f4604f6f2c6386ad5c44"} Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.899180 4650 scope.go:117] "RemoveContainer" containerID="29d196d29da2073b28128c7fde254db724160d7237c6e660290314f8d4b3f3f7" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.899295 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.923633 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"e84ba0eee2916b62f258425a14fc32dc70d2186a161e4ce2c3cf22c8a0b78b3b"} Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.923669 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"d98a001c0aed64d0aa6f36e0d05edfcf4acd588e3aabc82b8a4ef3faf3a106a7"} Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.924055 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.924091 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.941501 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.948332 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.965588 4650 scope.go:117] "RemoveContainer" containerID="8b99acef35160501425f3b881e05342a9a1133c6f987c4265efefab0e6d42f59" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.983181 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:56 crc kubenswrapper[4650]: E0201 07:42:56.983710 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="sg-core" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.983789 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="sg-core" Feb 01 07:42:56 crc kubenswrapper[4650]: E0201 07:42:56.983866 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-central-agent" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.983924 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-central-agent" Feb 01 07:42:56 crc kubenswrapper[4650]: E0201 07:42:56.983984 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-notification-agent" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.984079 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-notification-agent" Feb 01 07:42:56 crc kubenswrapper[4650]: E0201 07:42:56.984164 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="proxy-httpd" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.984214 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="proxy-httpd" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.984429 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-central-agent" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.984519 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="ceilometer-notification-agent" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.984578 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="proxy-httpd" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.984632 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="680e3982-832f-42e6-a5ba-6375217b266b" containerName="sg-core" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.986213 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.995697 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:42:56 crc kubenswrapper[4650]: I0201 07:42:56.995931 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.018127 4650 scope.go:117] "RemoveContainer" containerID="3ba13e512980dd5e72e8be2a70e0017c9a01c0d583ca8ce52daaeceb106e2fde" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.022842 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-599d7597b9-mh6hj" podStartSLOduration=3.022823664 podStartE2EDuration="3.022823664s" podCreationTimestamp="2026-02-01 07:42:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:42:56.995091414 +0000 UTC m=+1175.718189669" watchObservedRunningTime="2026-02-01 07:42:57.022823664 +0000 UTC m=+1175.745921909" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.023237 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.070549 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-log-httpd\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.070795 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-scripts\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.070812 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-config-data\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.070938 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmrct\" (UniqueName: \"kubernetes.io/projected/50d935fc-fa90-4b83-a7d8-d9175d3e9160-kube-api-access-tmrct\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.070965 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-run-httpd\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.071105 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.071175 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.097717 4650 scope.go:117] "RemoveContainer" containerID="c2f795c6639bb341610be8515b54993b33c481acbb80cec36b066ad8389f452f" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.173216 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-log-httpd\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.173286 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-scripts\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.173303 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-config-data\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.173338 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmrct\" (UniqueName: \"kubernetes.io/projected/50d935fc-fa90-4b83-a7d8-d9175d3e9160-kube-api-access-tmrct\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.173369 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-run-httpd\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.173416 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.173459 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.174123 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-log-httpd\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.174260 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-run-httpd\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.179193 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.183851 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-config-data\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.185208 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.188589 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-scripts\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.193266 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmrct\" (UniqueName: \"kubernetes.io/projected/50d935fc-fa90-4b83-a7d8-d9175d3e9160-kube-api-access-tmrct\") pod \"ceilometer-0\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.316901 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.833363 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:42:57 crc kubenswrapper[4650]: W0201 07:42:57.851203 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50d935fc_fa90_4b83_a7d8_d9175d3e9160.slice/crio-2e7980ba90fcf3e4f4ecf37bc42352fd94ce94c7337018c3044fe3e0acf35c1f WatchSource:0}: Error finding container 2e7980ba90fcf3e4f4ecf37bc42352fd94ce94c7337018c3044fe3e0acf35c1f: Status 404 returned error can't find the container with id 2e7980ba90fcf3e4f4ecf37bc42352fd94ce94c7337018c3044fe3e0acf35c1f Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.937526 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerStarted","Data":"2e7980ba90fcf3e4f4ecf37bc42352fd94ce94c7337018c3044fe3e0acf35c1f"} Feb 01 07:42:57 crc kubenswrapper[4650]: I0201 07:42:57.976747 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="680e3982-832f-42e6-a5ba-6375217b266b" path="/var/lib/kubelet/pods/680e3982-832f-42e6-a5ba-6375217b266b/volumes" Feb 01 07:42:58 crc kubenswrapper[4650]: I0201 07:42:58.984424 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="e84ba0eee2916b62f258425a14fc32dc70d2186a161e4ce2c3cf22c8a0b78b3b" exitCode=1 Feb 01 07:42:58 crc kubenswrapper[4650]: I0201 07:42:58.984578 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"e84ba0eee2916b62f258425a14fc32dc70d2186a161e4ce2c3cf22c8a0b78b3b"} Feb 01 07:42:58 crc kubenswrapper[4650]: I0201 07:42:58.985509 4650 scope.go:117] "RemoveContainer" containerID="e84ba0eee2916b62f258425a14fc32dc70d2186a161e4ce2c3cf22c8a0b78b3b" Feb 01 07:42:58 crc kubenswrapper[4650]: I0201 07:42:58.990316 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerStarted","Data":"59e2373a5e220111d67e66e42154c25be1646a5242cf503be76abf56f53d12f2"} Feb 01 07:42:59 crc kubenswrapper[4650]: I0201 07:42:59.999657 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerStarted","Data":"ec5168ca941e1f875f4921e3bddbdd2c01bd41a9a4eaa9ed478687fa1d2640fb"} Feb 01 07:43:00 crc kubenswrapper[4650]: I0201 07:43:00.003574 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"78ea0cb41121619435cbf7540e5d3f25514cf2de97603c865ab3c56d1fdc2457"} Feb 01 07:43:00 crc kubenswrapper[4650]: I0201 07:43:00.004218 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.020444 4650 generic.go:334] "Generic (PLEG): container finished" podID="7467859e-a792-4959-bd51-d353099352bd" containerID="37cd4406ada2c9a6fa17ddf90e372c2026b5e351b6825103b029e49fa4c52eaf" exitCode=137 Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.022284 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7467859e-a792-4959-bd51-d353099352bd","Type":"ContainerDied","Data":"37cd4406ada2c9a6fa17ddf90e372c2026b5e351b6825103b029e49fa4c52eaf"} Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.022397 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7467859e-a792-4959-bd51-d353099352bd","Type":"ContainerDied","Data":"fac7156c95f6ebc86803bd6c5d91b653686f10c64f354fa4732551fb36c9755e"} Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.022476 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fac7156c95f6ebc86803bd6c5d91b653686f10c64f354fa4732551fb36c9755e" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.030311 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerStarted","Data":"2e73fae6227047d6ed8bde8b98fade08ac739d44e7337db0957e7722efc2ce51"} Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.046903 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.055390 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.175410 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-scripts\") pod \"7467859e-a792-4959-bd51-d353099352bd\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.175500 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7467859e-a792-4959-bd51-d353099352bd-etc-machine-id\") pod \"7467859e-a792-4959-bd51-d353099352bd\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.175652 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7467859e-a792-4959-bd51-d353099352bd-logs\") pod \"7467859e-a792-4959-bd51-d353099352bd\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.175673 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data\") pod \"7467859e-a792-4959-bd51-d353099352bd\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.175692 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data-custom\") pod \"7467859e-a792-4959-bd51-d353099352bd\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.175714 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgm2c\" (UniqueName: \"kubernetes.io/projected/7467859e-a792-4959-bd51-d353099352bd-kube-api-access-cgm2c\") pod \"7467859e-a792-4959-bd51-d353099352bd\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.175749 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-combined-ca-bundle\") pod \"7467859e-a792-4959-bd51-d353099352bd\" (UID: \"7467859e-a792-4959-bd51-d353099352bd\") " Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.176487 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7467859e-a792-4959-bd51-d353099352bd-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7467859e-a792-4959-bd51-d353099352bd" (UID: "7467859e-a792-4959-bd51-d353099352bd"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.176675 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7467859e-a792-4959-bd51-d353099352bd-logs" (OuterVolumeSpecName: "logs") pod "7467859e-a792-4959-bd51-d353099352bd" (UID: "7467859e-a792-4959-bd51-d353099352bd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.183684 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7467859e-a792-4959-bd51-d353099352bd-kube-api-access-cgm2c" (OuterVolumeSpecName: "kube-api-access-cgm2c") pod "7467859e-a792-4959-bd51-d353099352bd" (UID: "7467859e-a792-4959-bd51-d353099352bd"). InnerVolumeSpecName "kube-api-access-cgm2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.205942 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-scripts" (OuterVolumeSpecName: "scripts") pod "7467859e-a792-4959-bd51-d353099352bd" (UID: "7467859e-a792-4959-bd51-d353099352bd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.206079 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7467859e-a792-4959-bd51-d353099352bd" (UID: "7467859e-a792-4959-bd51-d353099352bd"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.277311 4650 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7467859e-a792-4959-bd51-d353099352bd-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.277340 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7467859e-a792-4959-bd51-d353099352bd-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.277351 4650 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.277359 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgm2c\" (UniqueName: \"kubernetes.io/projected/7467859e-a792-4959-bd51-d353099352bd-kube-api-access-cgm2c\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.277371 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.281194 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7467859e-a792-4959-bd51-d353099352bd" (UID: "7467859e-a792-4959-bd51-d353099352bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.284460 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data" (OuterVolumeSpecName: "config-data") pod "7467859e-a792-4959-bd51-d353099352bd" (UID: "7467859e-a792-4959-bd51-d353099352bd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.379699 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:01 crc kubenswrapper[4650]: I0201 07:43:01.379730 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7467859e-a792-4959-bd51-d353099352bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.113868 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="78ea0cb41121619435cbf7540e5d3f25514cf2de97603c865ab3c56d1fdc2457" exitCode=1 Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.113944 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"78ea0cb41121619435cbf7540e5d3f25514cf2de97603c865ab3c56d1fdc2457"} Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.114209 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.114208 4650 scope.go:117] "RemoveContainer" containerID="e84ba0eee2916b62f258425a14fc32dc70d2186a161e4ce2c3cf22c8a0b78b3b" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.115242 4650 scope.go:117] "RemoveContainer" containerID="78ea0cb41121619435cbf7540e5d3f25514cf2de97603c865ab3c56d1fdc2457" Feb 01 07:43:02 crc kubenswrapper[4650]: E0201 07:43:02.115485 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.122473 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.239072 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.253591 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.269142 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:43:02 crc kubenswrapper[4650]: E0201 07:43:02.269574 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api-log" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.269590 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api-log" Feb 01 07:43:02 crc kubenswrapper[4650]: E0201 07:43:02.269610 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.269617 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.269784 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.269816 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7467859e-a792-4959-bd51-d353099352bd" containerName="cinder-api-log" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.270781 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.276808 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.276985 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.277112 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.278547 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.407563 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-config-data-custom\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.407664 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-scripts\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.407685 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.407716 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj5x5\" (UniqueName: \"kubernetes.io/projected/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-kube-api-access-sj5x5\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.407833 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-config-data\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.407862 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-logs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.408014 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-public-tls-certs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.408114 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.408249 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510455 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-public-tls-certs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510493 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510534 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510559 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-config-data-custom\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510608 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510626 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-scripts\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510773 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.510862 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj5x5\" (UniqueName: \"kubernetes.io/projected/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-kube-api-access-sj5x5\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.511058 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-config-data\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.511091 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-logs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.511741 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-logs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.515741 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-scripts\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.516012 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-config-data-custom\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.517081 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-public-tls-certs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.517967 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-config-data\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.519831 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.520683 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.525784 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj5x5\" (UniqueName: \"kubernetes.io/projected/dd2aba78-ffb9-4c24-bd46-0ecc5c93217e-kube-api-access-sj5x5\") pod \"cinder-api-0\" (UID: \"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e\") " pod="openstack/cinder-api-0" Feb 01 07:43:02 crc kubenswrapper[4650]: I0201 07:43:02.607913 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.134088 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerStarted","Data":"7c758a1ac49a68fd9113195937c7ef256dfee8485bf9413a47c4de41e8990d59"} Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.134610 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.159122 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.01056867 podStartE2EDuration="7.159090412s" podCreationTimestamp="2026-02-01 07:42:56 +0000 UTC" firstStartedPulling="2026-02-01 07:42:57.85324125 +0000 UTC m=+1176.576339495" lastFinishedPulling="2026-02-01 07:43:02.001762992 +0000 UTC m=+1180.724861237" observedRunningTime="2026-02-01 07:43:03.154647995 +0000 UTC m=+1181.877746240" watchObservedRunningTime="2026-02-01 07:43:03.159090412 +0000 UTC m=+1181.882188657" Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.290982 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 01 07:43:03 crc kubenswrapper[4650]: W0201 07:43:03.310918 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd2aba78_ffb9_4c24_bd46_0ecc5c93217e.slice/crio-e47112c3634b2e584614551bdb189abce17c2dbcd7cf741e54260c524ca6b30d WatchSource:0}: Error finding container e47112c3634b2e584614551bdb189abce17c2dbcd7cf741e54260c524ca6b30d: Status 404 returned error can't find the container with id e47112c3634b2e584614551bdb189abce17c2dbcd7cf741e54260c524ca6b30d Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.806197 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.806447 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.810806 4650 scope.go:117] "RemoveContainer" containerID="78ea0cb41121619435cbf7540e5d3f25514cf2de97603c865ab3c56d1fdc2457" Feb 01 07:43:03 crc kubenswrapper[4650]: E0201 07:43:03.811974 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.819028 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:03 crc kubenswrapper[4650]: I0201 07:43:03.983908 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7467859e-a792-4959-bd51-d353099352bd" path="/var/lib/kubelet/pods/7467859e-a792-4959-bd51-d353099352bd/volumes" Feb 01 07:43:04 crc kubenswrapper[4650]: I0201 07:43:04.016255 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Feb 01 07:43:04 crc kubenswrapper[4650]: I0201 07:43:04.134774 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-79fd8b5f84-qg9cv" podUID="9c4bad14-279f-4212-a86d-cea1c9fe7b48" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Feb 01 07:43:04 crc kubenswrapper[4650]: I0201 07:43:04.167459 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e","Type":"ContainerStarted","Data":"564075d0814f4a22794812a7991b404ce8485fdf56b93cde76f53015a6d1a01f"} Feb 01 07:43:04 crc kubenswrapper[4650]: I0201 07:43:04.167502 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e","Type":"ContainerStarted","Data":"e47112c3634b2e584614551bdb189abce17c2dbcd7cf741e54260c524ca6b30d"} Feb 01 07:43:04 crc kubenswrapper[4650]: I0201 07:43:04.174064 4650 generic.go:334] "Generic (PLEG): container finished" podID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerID="cca07a3ed44421c1c2ddae2db88b3fa3f088622070c2075bd1008da4c9b836d4" exitCode=0 Feb 01 07:43:04 crc kubenswrapper[4650]: I0201 07:43:04.174176 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5fb6df7-xcnvd" event={"ID":"7bcbe498-d2bb-4ad5-87dd-f2896380acfe","Type":"ContainerDied","Data":"cca07a3ed44421c1c2ddae2db88b3fa3f088622070c2075bd1008da4c9b836d4"} Feb 01 07:43:04 crc kubenswrapper[4650]: I0201 07:43:04.813736 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:06 crc kubenswrapper[4650]: I0201 07:43:06.614919 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:06 crc kubenswrapper[4650]: I0201 07:43:06.615471 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-central-agent" containerID="cri-o://59e2373a5e220111d67e66e42154c25be1646a5242cf503be76abf56f53d12f2" gracePeriod=30 Feb 01 07:43:06 crc kubenswrapper[4650]: I0201 07:43:06.615601 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="sg-core" containerID="cri-o://2e73fae6227047d6ed8bde8b98fade08ac739d44e7337db0957e7722efc2ce51" gracePeriod=30 Feb 01 07:43:06 crc kubenswrapper[4650]: I0201 07:43:06.615600 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="proxy-httpd" containerID="cri-o://7c758a1ac49a68fd9113195937c7ef256dfee8485bf9413a47c4de41e8990d59" gracePeriod=30 Feb 01 07:43:06 crc kubenswrapper[4650]: I0201 07:43:06.615655 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-notification-agent" containerID="cri-o://ec5168ca941e1f875f4921e3bddbdd2c01bd41a9a4eaa9ed478687fa1d2640fb" gracePeriod=30 Feb 01 07:43:06 crc kubenswrapper[4650]: I0201 07:43:06.807873 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.208979 4650 generic.go:334] "Generic (PLEG): container finished" podID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerID="7c758a1ac49a68fd9113195937c7ef256dfee8485bf9413a47c4de41e8990d59" exitCode=0 Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.209009 4650 generic.go:334] "Generic (PLEG): container finished" podID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerID="2e73fae6227047d6ed8bde8b98fade08ac739d44e7337db0957e7722efc2ce51" exitCode=2 Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.209017 4650 generic.go:334] "Generic (PLEG): container finished" podID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerID="ec5168ca941e1f875f4921e3bddbdd2c01bd41a9a4eaa9ed478687fa1d2640fb" exitCode=0 Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.209052 4650 generic.go:334] "Generic (PLEG): container finished" podID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerID="59e2373a5e220111d67e66e42154c25be1646a5242cf503be76abf56f53d12f2" exitCode=0 Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.209071 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerDied","Data":"7c758a1ac49a68fd9113195937c7ef256dfee8485bf9413a47c4de41e8990d59"} Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.209092 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerDied","Data":"2e73fae6227047d6ed8bde8b98fade08ac739d44e7337db0957e7722efc2ce51"} Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.209102 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerDied","Data":"ec5168ca941e1f875f4921e3bddbdd2c01bd41a9a4eaa9ed478687fa1d2640fb"} Feb 01 07:43:07 crc kubenswrapper[4650]: I0201 07:43:07.209111 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerDied","Data":"59e2373a5e220111d67e66e42154c25be1646a5242cf503be76abf56f53d12f2"} Feb 01 07:43:08 crc kubenswrapper[4650]: I0201 07:43:08.965885 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:43:08 crc kubenswrapper[4650]: I0201 07:43:08.966193 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:43:08 crc kubenswrapper[4650]: I0201 07:43:08.966278 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:43:08 crc kubenswrapper[4650]: E0201 07:43:08.966600 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:43:09 crc kubenswrapper[4650]: I0201 07:43:09.813257 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:09 crc kubenswrapper[4650]: I0201 07:43:09.813605 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:09 crc kubenswrapper[4650]: I0201 07:43:09.814349 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"d98a001c0aed64d0aa6f36e0d05edfcf4acd588e3aabc82b8a4ef3faf3a106a7"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:43:09 crc kubenswrapper[4650]: I0201 07:43:09.814373 4650 scope.go:117] "RemoveContainer" containerID="78ea0cb41121619435cbf7540e5d3f25514cf2de97603c865ab3c56d1fdc2457" Feb 01 07:43:09 crc kubenswrapper[4650]: I0201 07:43:09.814397 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://d98a001c0aed64d0aa6f36e0d05edfcf4acd588e3aabc82b8a4ef3faf3a106a7" gracePeriod=30 Feb 01 07:43:09 crc kubenswrapper[4650]: I0201 07:43:09.814826 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:09 crc kubenswrapper[4650]: I0201 07:43:09.835452 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": EOF" Feb 01 07:43:10 crc kubenswrapper[4650]: I0201 07:43:10.242456 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="d98a001c0aed64d0aa6f36e0d05edfcf4acd588e3aabc82b8a4ef3faf3a106a7" exitCode=0 Feb 01 07:43:10 crc kubenswrapper[4650]: I0201 07:43:10.242515 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"d98a001c0aed64d0aa6f36e0d05edfcf4acd588e3aabc82b8a4ef3faf3a106a7"} Feb 01 07:43:10 crc kubenswrapper[4650]: I0201 07:43:10.684042 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:43:10 crc kubenswrapper[4650]: I0201 07:43:10.685201 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-8d56c6c66-9jtxc" Feb 01 07:43:10 crc kubenswrapper[4650]: I0201 07:43:10.760120 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-87f9c5788-4s9lh"] Feb 01 07:43:10 crc kubenswrapper[4650]: I0201 07:43:10.760347 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-87f9c5788-4s9lh" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-log" containerID="cri-o://1b32ba94858cc0c3a1436431907746a3cd185a00b7f2d436e309e29710dc850e" gracePeriod=30 Feb 01 07:43:10 crc kubenswrapper[4650]: I0201 07:43:10.762496 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-87f9c5788-4s9lh" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-api" containerID="cri-o://16e1f0a1172f0b931837eee6c283685a5230303871b51476c961200d508c6bd7" gracePeriod=30 Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.256368 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5fb6df7-xcnvd" event={"ID":"7bcbe498-d2bb-4ad5-87dd-f2896380acfe","Type":"ContainerDied","Data":"cb2f8af6d7f431c3f8fcdccb2f2945cbb5fed4481dbae22bd46f8b30ef76af31"} Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.256605 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb2f8af6d7f431c3f8fcdccb2f2945cbb5fed4481dbae22bd46f8b30ef76af31" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.258253 4650 generic.go:334] "Generic (PLEG): container finished" podID="ae091b31-cc44-44f4-a374-6373c9501292" containerID="1b32ba94858cc0c3a1436431907746a3cd185a00b7f2d436e309e29710dc850e" exitCode=143 Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.260102 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-87f9c5788-4s9lh" event={"ID":"ae091b31-cc44-44f4-a374-6373c9501292","Type":"ContainerDied","Data":"1b32ba94858cc0c3a1436431907746a3cd185a00b7f2d436e309e29710dc850e"} Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.366808 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.486547 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-ovndb-tls-certs\") pod \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.486860 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-config\") pod \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.486923 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-public-tls-certs\") pod \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.486984 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-combined-ca-bundle\") pod \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.487011 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcr6t\" (UniqueName: \"kubernetes.io/projected/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-kube-api-access-hcr6t\") pod \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.487067 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-internal-tls-certs\") pod \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.487148 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-httpd-config\") pod \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\" (UID: \"7bcbe498-d2bb-4ad5-87dd-f2896380acfe\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.551169 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7bcbe498-d2bb-4ad5-87dd-f2896380acfe" (UID: "7bcbe498-d2bb-4ad5-87dd-f2896380acfe"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.552665 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-kube-api-access-hcr6t" (OuterVolumeSpecName: "kube-api-access-hcr6t") pod "7bcbe498-d2bb-4ad5-87dd-f2896380acfe" (UID: "7bcbe498-d2bb-4ad5-87dd-f2896380acfe"). InnerVolumeSpecName "kube-api-access-hcr6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.610366 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcr6t\" (UniqueName: \"kubernetes.io/projected/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-kube-api-access-hcr6t\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.610388 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.639163 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7bcbe498-d2bb-4ad5-87dd-f2896380acfe" (UID: "7bcbe498-d2bb-4ad5-87dd-f2896380acfe"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.657421 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bcbe498-d2bb-4ad5-87dd-f2896380acfe" (UID: "7bcbe498-d2bb-4ad5-87dd-f2896380acfe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.665821 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-config" (OuterVolumeSpecName: "config") pod "7bcbe498-d2bb-4ad5-87dd-f2896380acfe" (UID: "7bcbe498-d2bb-4ad5-87dd-f2896380acfe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.712987 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.713018 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.713057 4650 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.741155 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7bcbe498-d2bb-4ad5-87dd-f2896380acfe" (UID: "7bcbe498-d2bb-4ad5-87dd-f2896380acfe"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.752112 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7bcbe498-d2bb-4ad5-87dd-f2896380acfe" (UID: "7bcbe498-d2bb-4ad5-87dd-f2896380acfe"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.768411 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.831337 4650 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.831380 4650 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bcbe498-d2bb-4ad5-87dd-f2896380acfe-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934069 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-config-data\") pod \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934112 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-combined-ca-bundle\") pod \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934177 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-log-httpd\") pod \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934536 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmrct\" (UniqueName: \"kubernetes.io/projected/50d935fc-fa90-4b83-a7d8-d9175d3e9160-kube-api-access-tmrct\") pod \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934659 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "50d935fc-fa90-4b83-a7d8-d9175d3e9160" (UID: "50d935fc-fa90-4b83-a7d8-d9175d3e9160"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934860 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-sg-core-conf-yaml\") pod \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934940 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-run-httpd\") pod \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.934977 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-scripts\") pod \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\" (UID: \"50d935fc-fa90-4b83-a7d8-d9175d3e9160\") " Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.935392 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.939175 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "50d935fc-fa90-4b83-a7d8-d9175d3e9160" (UID: "50d935fc-fa90-4b83-a7d8-d9175d3e9160"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.941699 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-scripts" (OuterVolumeSpecName: "scripts") pod "50d935fc-fa90-4b83-a7d8-d9175d3e9160" (UID: "50d935fc-fa90-4b83-a7d8-d9175d3e9160"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:11 crc kubenswrapper[4650]: I0201 07:43:11.952630 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50d935fc-fa90-4b83-a7d8-d9175d3e9160-kube-api-access-tmrct" (OuterVolumeSpecName: "kube-api-access-tmrct") pod "50d935fc-fa90-4b83-a7d8-d9175d3e9160" (UID: "50d935fc-fa90-4b83-a7d8-d9175d3e9160"). InnerVolumeSpecName "kube-api-access-tmrct". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.037624 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmrct\" (UniqueName: \"kubernetes.io/projected/50d935fc-fa90-4b83-a7d8-d9175d3e9160-kube-api-access-tmrct\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.037648 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/50d935fc-fa90-4b83-a7d8-d9175d3e9160-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.037657 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.053359 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "50d935fc-fa90-4b83-a7d8-d9175d3e9160" (UID: "50d935fc-fa90-4b83-a7d8-d9175d3e9160"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.081216 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50d935fc-fa90-4b83-a7d8-d9175d3e9160" (UID: "50d935fc-fa90-4b83-a7d8-d9175d3e9160"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.111975 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-config-data" (OuterVolumeSpecName: "config-data") pod "50d935fc-fa90-4b83-a7d8-d9175d3e9160" (UID: "50d935fc-fa90-4b83-a7d8-d9175d3e9160"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.140006 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.140048 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.140060 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d935fc-fa90-4b83-a7d8-d9175d3e9160-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:12 crc kubenswrapper[4650]: E0201 07:43:12.215815 4650 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7bcbe498_d2bb_4ad5_87dd_f2896380acfe.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7bcbe498_d2bb_4ad5_87dd_f2896380acfe.slice/crio-cb2f8af6d7f431c3f8fcdccb2f2945cbb5fed4481dbae22bd46f8b30ef76af31\": RecentStats: unable to find data in memory cache]" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.294754 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78"} Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.294804 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"4036202ac516e18523b1a52ae487b8712f118510898928b033522621b7f15ee2"} Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.295121 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.303097 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.310310 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"50d935fc-fa90-4b83-a7d8-d9175d3e9160","Type":"ContainerDied","Data":"2e7980ba90fcf3e4f4ecf37bc42352fd94ce94c7337018c3044fe3e0acf35c1f"} Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.310362 4650 scope.go:117] "RemoveContainer" containerID="7c758a1ac49a68fd9113195937c7ef256dfee8485bf9413a47c4de41e8990d59" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.310521 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.324699 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5fb6df7-xcnvd" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.325595 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"eb39e44a-8146-4d73-bee6-6f5a65ccd5e4","Type":"ContainerStarted","Data":"f0223f835b8c190469d87818cf818e40be4f80e93cbfe3943b69b049681d78b3"} Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.342140 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.54467297 podStartE2EDuration="21.342122017s" podCreationTimestamp="2026-02-01 07:42:51 +0000 UTC" firstStartedPulling="2026-02-01 07:42:52.649663882 +0000 UTC m=+1171.372762127" lastFinishedPulling="2026-02-01 07:43:11.447112929 +0000 UTC m=+1190.170211174" observedRunningTime="2026-02-01 07:43:12.340176586 +0000 UTC m=+1191.063274851" watchObservedRunningTime="2026-02-01 07:43:12.342122017 +0000 UTC m=+1191.065220262" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.372171 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.372310 4650 scope.go:117] "RemoveContainer" containerID="2e73fae6227047d6ed8bde8b98fade08ac739d44e7337db0957e7722efc2ce51" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.381675 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.397797 4650 scope.go:117] "RemoveContainer" containerID="ec5168ca941e1f875f4921e3bddbdd2c01bd41a9a4eaa9ed478687fa1d2640fb" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.419477 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:12 crc kubenswrapper[4650]: E0201 07:43:12.419893 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="sg-core" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.419921 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="sg-core" Feb 01 07:43:12 crc kubenswrapper[4650]: E0201 07:43:12.419936 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-notification-agent" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.419942 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-notification-agent" Feb 01 07:43:12 crc kubenswrapper[4650]: E0201 07:43:12.419951 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-central-agent" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.419958 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-central-agent" Feb 01 07:43:12 crc kubenswrapper[4650]: E0201 07:43:12.419970 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="proxy-httpd" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.419977 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="proxy-httpd" Feb 01 07:43:12 crc kubenswrapper[4650]: E0201 07:43:12.419987 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-api" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.419992 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-api" Feb 01 07:43:12 crc kubenswrapper[4650]: E0201 07:43:12.420008 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-httpd" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.420014 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-httpd" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.420204 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-central-agent" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.420222 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="ceilometer-notification-agent" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.420230 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-httpd" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.420239 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="proxy-httpd" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.420249 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" containerName="sg-core" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.420259 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" containerName="neutron-api" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.421806 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.424612 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.424660 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.449135 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-78c5fb6df7-xcnvd"] Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.456202 4650 scope.go:117] "RemoveContainer" containerID="59e2373a5e220111d67e66e42154c25be1646a5242cf503be76abf56f53d12f2" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.468274 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-78c5fb6df7-xcnvd"] Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.489572 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.559196 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.559513 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-run-httpd\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.559554 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk4qr\" (UniqueName: \"kubernetes.io/projected/04d8e4b2-cc51-4600-b27f-01b215ff5279-kube-api-access-rk4qr\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.559870 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.559895 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-log-httpd\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.559917 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-scripts\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.559940 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-config-data\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661219 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk4qr\" (UniqueName: \"kubernetes.io/projected/04d8e4b2-cc51-4600-b27f-01b215ff5279-kube-api-access-rk4qr\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661288 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661311 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-log-httpd\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661333 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-scripts\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661357 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-config-data\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661434 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661465 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-run-httpd\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.661876 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-run-httpd\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.662167 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-log-httpd\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.669566 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.671698 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-config-data\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.679893 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.680564 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk4qr\" (UniqueName: \"kubernetes.io/projected/04d8e4b2-cc51-4600-b27f-01b215ff5279-kube-api-access-rk4qr\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.684462 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-scripts\") pod \"ceilometer-0\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " pod="openstack/ceilometer-0" Feb 01 07:43:12 crc kubenswrapper[4650]: I0201 07:43:12.749869 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.270046 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.337823 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" exitCode=1 Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.338017 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78"} Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.338140 4650 scope.go:117] "RemoveContainer" containerID="78ea0cb41121619435cbf7540e5d3f25514cf2de97603c865ab3c56d1fdc2457" Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.338280 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.338764 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:13 crc kubenswrapper[4650]: E0201 07:43:13.338967 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.342970 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd2aba78-ffb9-4c24-bd46-0ecc5c93217e","Type":"ContainerStarted","Data":"04bce57b6c6b1e149b31256fe0e52889b5ece6dddcfebaabc5032cd31b5b8a21"} Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.343103 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.344146 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerStarted","Data":"e141b064f47847c16e3c3bcc078ec114f64b53f7fbe7f4efb75c011da1bbfdce"} Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.426430 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=11.426403013 podStartE2EDuration="11.426403013s" podCreationTimestamp="2026-02-01 07:43:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:13.42252431 +0000 UTC m=+1192.145622555" watchObservedRunningTime="2026-02-01 07:43:13.426403013 +0000 UTC m=+1192.149501258" Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.983796 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50d935fc-fa90-4b83-a7d8-d9175d3e9160" path="/var/lib/kubelet/pods/50d935fc-fa90-4b83-a7d8-d9175d3e9160/volumes" Feb 01 07:43:13 crc kubenswrapper[4650]: I0201 07:43:13.984715 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bcbe498-d2bb-4ad5-87dd-f2896380acfe" path="/var/lib/kubelet/pods/7bcbe498-d2bb-4ad5-87dd-f2896380acfe/volumes" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.380366 4650 generic.go:334] "Generic (PLEG): container finished" podID="ae091b31-cc44-44f4-a374-6373c9501292" containerID="16e1f0a1172f0b931837eee6c283685a5230303871b51476c961200d508c6bd7" exitCode=0 Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.380781 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-87f9c5788-4s9lh" event={"ID":"ae091b31-cc44-44f4-a374-6373c9501292","Type":"ContainerDied","Data":"16e1f0a1172f0b931837eee6c283685a5230303871b51476c961200d508c6bd7"} Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.380817 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-87f9c5788-4s9lh" event={"ID":"ae091b31-cc44-44f4-a374-6373c9501292","Type":"ContainerDied","Data":"dd45ddcb48430dde063fa9684da7e72be8afb55c3933f96efd717d57199810f0"} Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.380831 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd45ddcb48430dde063fa9684da7e72be8afb55c3933f96efd717d57199810f0" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.387940 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerStarted","Data":"191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4"} Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.392933 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:14 crc kubenswrapper[4650]: E0201 07:43:14.393168 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.424252 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.519722 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-public-tls-certs\") pod \"ae091b31-cc44-44f4-a374-6373c9501292\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.519791 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-config-data\") pod \"ae091b31-cc44-44f4-a374-6373c9501292\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.519824 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae091b31-cc44-44f4-a374-6373c9501292-logs\") pod \"ae091b31-cc44-44f4-a374-6373c9501292\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.519906 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-internal-tls-certs\") pod \"ae091b31-cc44-44f4-a374-6373c9501292\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.519961 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-combined-ca-bundle\") pod \"ae091b31-cc44-44f4-a374-6373c9501292\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.519978 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-scripts\") pod \"ae091b31-cc44-44f4-a374-6373c9501292\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.519996 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbblt\" (UniqueName: \"kubernetes.io/projected/ae091b31-cc44-44f4-a374-6373c9501292-kube-api-access-cbblt\") pod \"ae091b31-cc44-44f4-a374-6373c9501292\" (UID: \"ae091b31-cc44-44f4-a374-6373c9501292\") " Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.523841 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae091b31-cc44-44f4-a374-6373c9501292-logs" (OuterVolumeSpecName: "logs") pod "ae091b31-cc44-44f4-a374-6373c9501292" (UID: "ae091b31-cc44-44f4-a374-6373c9501292"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.529342 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-scripts" (OuterVolumeSpecName: "scripts") pod "ae091b31-cc44-44f4-a374-6373c9501292" (UID: "ae091b31-cc44-44f4-a374-6373c9501292"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.530758 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae091b31-cc44-44f4-a374-6373c9501292-kube-api-access-cbblt" (OuterVolumeSpecName: "kube-api-access-cbblt") pod "ae091b31-cc44-44f4-a374-6373c9501292" (UID: "ae091b31-cc44-44f4-a374-6373c9501292"). InnerVolumeSpecName "kube-api-access-cbblt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.627479 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae091b31-cc44-44f4-a374-6373c9501292-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.627512 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.627529 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbblt\" (UniqueName: \"kubernetes.io/projected/ae091b31-cc44-44f4-a374-6373c9501292-kube-api-access-cbblt\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.716299 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-config-data" (OuterVolumeSpecName: "config-data") pod "ae091b31-cc44-44f4-a374-6373c9501292" (UID: "ae091b31-cc44-44f4-a374-6373c9501292"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.720169 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae091b31-cc44-44f4-a374-6373c9501292" (UID: "ae091b31-cc44-44f4-a374-6373c9501292"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.729234 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.729273 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.741856 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ae091b31-cc44-44f4-a374-6373c9501292" (UID: "ae091b31-cc44-44f4-a374-6373c9501292"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.761156 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ae091b31-cc44-44f4-a374-6373c9501292" (UID: "ae091b31-cc44-44f4-a374-6373c9501292"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.830434 4650 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:14 crc kubenswrapper[4650]: I0201 07:43:14.830716 4650 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae091b31-cc44-44f4-a374-6373c9501292-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.400741 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-87f9c5788-4s9lh" Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.401410 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerStarted","Data":"7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8"} Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.401460 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerStarted","Data":"0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6"} Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.432934 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-87f9c5788-4s9lh"] Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.439968 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-87f9c5788-4s9lh"] Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.799902 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.800600 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:15 crc kubenswrapper[4650]: E0201 07:43:15.800940 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:15 crc kubenswrapper[4650]: I0201 07:43:15.993804 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae091b31-cc44-44f4-a374-6373c9501292" path="/var/lib/kubelet/pods/ae091b31-cc44-44f4-a374-6373c9501292/volumes" Feb 01 07:43:16 crc kubenswrapper[4650]: I0201 07:43:16.710803 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:16 crc kubenswrapper[4650]: I0201 07:43:16.933757 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:43:17 crc kubenswrapper[4650]: I0201 07:43:17.001691 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.431694 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerStarted","Data":"a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8"} Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.431954 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-notification-agent" containerID="cri-o://0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6" gracePeriod=30 Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.431975 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="proxy-httpd" containerID="cri-o://a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8" gracePeriod=30 Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.431985 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="sg-core" containerID="cri-o://7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8" gracePeriod=30 Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.432202 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.431901 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-central-agent" containerID="cri-o://191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4" gracePeriod=30 Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.462645 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.532151653 podStartE2EDuration="6.462627479s" podCreationTimestamp="2026-02-01 07:43:12 +0000 UTC" firstStartedPulling="2026-02-01 07:43:13.273741969 +0000 UTC m=+1191.996840214" lastFinishedPulling="2026-02-01 07:43:17.204217795 +0000 UTC m=+1195.927316040" observedRunningTime="2026-02-01 07:43:18.459167418 +0000 UTC m=+1197.182265673" watchObservedRunningTime="2026-02-01 07:43:18.462627479 +0000 UTC m=+1197.185725714" Feb 01 07:43:18 crc kubenswrapper[4650]: I0201 07:43:18.808951 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.021573 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-79fd8b5f84-qg9cv" Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.135073 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b4d45c6bd-qsdbt"] Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.135283 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon-log" containerID="cri-o://121bf0c27d3cd2492d3454ae6a47181d459961964a1aefdd883d489176849870" gracePeriod=30 Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.135697 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" containerID="cri-o://a451bbea895b092fac95434c169fa8820f75c4dcfbd374cb478929932b3b5264" gracePeriod=30 Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.143139 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.441482 4650 generic.go:334] "Generic (PLEG): container finished" podID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerID="a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8" exitCode=0 Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.441518 4650 generic.go:334] "Generic (PLEG): container finished" podID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerID="7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8" exitCode=2 Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.441528 4650 generic.go:334] "Generic (PLEG): container finished" podID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerID="0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6" exitCode=0 Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.441548 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerDied","Data":"a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8"} Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.441574 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerDied","Data":"7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8"} Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.441583 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerDied","Data":"0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6"} Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.741270 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.808144 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.985424 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.985815 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:43:19 crc kubenswrapper[4650]: I0201 07:43:19.985938 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:43:19 crc kubenswrapper[4650]: E0201 07:43:19.986439 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.269805 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453003 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-config-data\") pod \"04d8e4b2-cc51-4600-b27f-01b215ff5279\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453057 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-scripts\") pod \"04d8e4b2-cc51-4600-b27f-01b215ff5279\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453116 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-combined-ca-bundle\") pod \"04d8e4b2-cc51-4600-b27f-01b215ff5279\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453147 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-log-httpd\") pod \"04d8e4b2-cc51-4600-b27f-01b215ff5279\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453172 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rk4qr\" (UniqueName: \"kubernetes.io/projected/04d8e4b2-cc51-4600-b27f-01b215ff5279-kube-api-access-rk4qr\") pod \"04d8e4b2-cc51-4600-b27f-01b215ff5279\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453201 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-run-httpd\") pod \"04d8e4b2-cc51-4600-b27f-01b215ff5279\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453303 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-sg-core-conf-yaml\") pod \"04d8e4b2-cc51-4600-b27f-01b215ff5279\" (UID: \"04d8e4b2-cc51-4600-b27f-01b215ff5279\") " Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453712 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "04d8e4b2-cc51-4600-b27f-01b215ff5279" (UID: "04d8e4b2-cc51-4600-b27f-01b215ff5279"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.453803 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "04d8e4b2-cc51-4600-b27f-01b215ff5279" (UID: "04d8e4b2-cc51-4600-b27f-01b215ff5279"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.470136 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04d8e4b2-cc51-4600-b27f-01b215ff5279-kube-api-access-rk4qr" (OuterVolumeSpecName: "kube-api-access-rk4qr") pod "04d8e4b2-cc51-4600-b27f-01b215ff5279" (UID: "04d8e4b2-cc51-4600-b27f-01b215ff5279"). InnerVolumeSpecName "kube-api-access-rk4qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.470269 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-scripts" (OuterVolumeSpecName: "scripts") pod "04d8e4b2-cc51-4600-b27f-01b215ff5279" (UID: "04d8e4b2-cc51-4600-b27f-01b215ff5279"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.492406 4650 generic.go:334] "Generic (PLEG): container finished" podID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerID="191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4" exitCode=0 Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.492448 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerDied","Data":"191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4"} Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.492472 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04d8e4b2-cc51-4600-b27f-01b215ff5279","Type":"ContainerDied","Data":"e141b064f47847c16e3c3bcc078ec114f64b53f7fbe7f4efb75c011da1bbfdce"} Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.492489 4650 scope.go:117] "RemoveContainer" containerID="a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.492615 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.501246 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "04d8e4b2-cc51-4600-b27f-01b215ff5279" (UID: "04d8e4b2-cc51-4600-b27f-01b215ff5279"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.557002 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.557053 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.557063 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.557071 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rk4qr\" (UniqueName: \"kubernetes.io/projected/04d8e4b2-cc51-4600-b27f-01b215ff5279-kube-api-access-rk4qr\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.557080 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04d8e4b2-cc51-4600-b27f-01b215ff5279-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580230 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-86k28"] Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.580628 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-log" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580639 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-log" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.580655 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-notification-agent" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580662 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-notification-agent" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.580678 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-central-agent" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580684 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-central-agent" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.580701 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="sg-core" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580707 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="sg-core" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.580716 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="proxy-httpd" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580722 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="proxy-httpd" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.580734 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-api" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580739 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-api" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580898 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="proxy-httpd" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580909 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-notification-agent" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580922 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-log" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580933 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="ceilometer-central-agent" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580948 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" containerName="sg-core" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.580960 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae091b31-cc44-44f4-a374-6373c9501292" containerName="placement-api" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.581559 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.584899 4650 scope.go:117] "RemoveContainer" containerID="7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.599815 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-config-data" (OuterVolumeSpecName: "config-data") pod "04d8e4b2-cc51-4600-b27f-01b215ff5279" (UID: "04d8e4b2-cc51-4600-b27f-01b215ff5279"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.600498 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-86k28"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.629869 4650 scope.go:117] "RemoveContainer" containerID="0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.646115 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-9wwvn"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.646229 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04d8e4b2-cc51-4600-b27f-01b215ff5279" (UID: "04d8e4b2-cc51-4600-b27f-01b215ff5279"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.652423 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.658325 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.658358 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04d8e4b2-cc51-4600-b27f-01b215ff5279-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.689007 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-d87a-account-create-update-gd2x9"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.690214 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.691953 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.700183 4650 scope.go:117] "RemoveContainer" containerID="191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.705779 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9wwvn"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.737192 4650 scope.go:117] "RemoveContainer" containerID="a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.739611 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8\": container with ID starting with a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8 not found: ID does not exist" containerID="a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.739648 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8"} err="failed to get container status \"a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8\": rpc error: code = NotFound desc = could not find container \"a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8\": container with ID starting with a586bae7607868db567a76ff2d0d551acbdedb69c8c7277c1732873ce40a6da8 not found: ID does not exist" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.739672 4650 scope.go:117] "RemoveContainer" containerID="7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.740080 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8\": container with ID starting with 7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8 not found: ID does not exist" containerID="7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.740107 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8"} err="failed to get container status \"7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8\": rpc error: code = NotFound desc = could not find container \"7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8\": container with ID starting with 7c358ff5a88c7d9836d0e3fd955c54de5fdfc8d171acefc5388644db769c9fb8 not found: ID does not exist" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.740123 4650 scope.go:117] "RemoveContainer" containerID="0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.744214 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6\": container with ID starting with 0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6 not found: ID does not exist" containerID="0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.744265 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6"} err="failed to get container status \"0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6\": rpc error: code = NotFound desc = could not find container \"0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6\": container with ID starting with 0c61c47c5086c9cbd8b038b77c0a39c110aa57f3402a5cc37d628b8ee61801a6 not found: ID does not exist" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.744292 4650 scope.go:117] "RemoveContainer" containerID="191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4" Feb 01 07:43:21 crc kubenswrapper[4650]: E0201 07:43:21.747056 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4\": container with ID starting with 191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4 not found: ID does not exist" containerID="191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.747073 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4"} err="failed to get container status \"191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4\": rpc error: code = NotFound desc = could not find container \"191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4\": container with ID starting with 191ac6aa5ff539cfcde35cc955c93c529c5665621eaf3f7d46c4208afdf9f5e4 not found: ID does not exist" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.759819 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c7dfd53-9a47-402b-951c-e785181e81a2-operator-scripts\") pod \"nova-cell0-db-create-9wwvn\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.759936 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m29d\" (UniqueName: \"kubernetes.io/projected/6c7dfd53-9a47-402b-951c-e785181e81a2-kube-api-access-7m29d\") pod \"nova-cell0-db-create-9wwvn\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.760020 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6d6109a-7337-4d9b-bb82-b0f778d843c7-operator-scripts\") pod \"nova-api-db-create-86k28\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.760121 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7wz7\" (UniqueName: \"kubernetes.io/projected/a6d6109a-7337-4d9b-bb82-b0f778d843c7-kube-api-access-l7wz7\") pod \"nova-api-db-create-86k28\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.760488 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-d87a-account-create-update-gd2x9"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.810388 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.826376 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.840112 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.850641 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-85ntk"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.856760 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.862358 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c7dfd53-9a47-402b-951c-e785181e81a2-operator-scripts\") pod \"nova-cell0-db-create-9wwvn\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.862415 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b50b989-57dd-4a03-99ad-c46a180a3136-operator-scripts\") pod \"nova-api-d87a-account-create-update-gd2x9\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.862445 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m29d\" (UniqueName: \"kubernetes.io/projected/6c7dfd53-9a47-402b-951c-e785181e81a2-kube-api-access-7m29d\") pod \"nova-cell0-db-create-9wwvn\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.862518 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6d6109a-7337-4d9b-bb82-b0f778d843c7-operator-scripts\") pod \"nova-api-db-create-86k28\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.862537 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngxtc\" (UniqueName: \"kubernetes.io/projected/4b50b989-57dd-4a03-99ad-c46a180a3136-kube-api-access-ngxtc\") pod \"nova-api-d87a-account-create-update-gd2x9\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.862557 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7wz7\" (UniqueName: \"kubernetes.io/projected/a6d6109a-7337-4d9b-bb82-b0f778d843c7-kube-api-access-l7wz7\") pod \"nova-api-db-create-86k28\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.863607 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c7dfd53-9a47-402b-951c-e785181e81a2-operator-scripts\") pod \"nova-cell0-db-create-9wwvn\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.869622 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6d6109a-7337-4d9b-bb82-b0f778d843c7-operator-scripts\") pod \"nova-api-db-create-86k28\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.875052 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-85ntk"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.899859 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m29d\" (UniqueName: \"kubernetes.io/projected/6c7dfd53-9a47-402b-951c-e785181e81a2-kube-api-access-7m29d\") pod \"nova-cell0-db-create-9wwvn\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.913534 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f8ab-account-create-update-htkmw"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.914722 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.920955 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.922020 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7wz7\" (UniqueName: \"kubernetes.io/projected/a6d6109a-7337-4d9b-bb82-b0f778d843c7-kube-api-access-l7wz7\") pod \"nova-api-db-create-86k28\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.926455 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.926934 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.928542 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.940469 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.940764 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.944806 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f8ab-account-create-update-htkmw"] Feb 01 07:43:21 crc kubenswrapper[4650]: I0201 07:43:21.953147 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.018322 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckhz8\" (UniqueName: \"kubernetes.io/projected/3f2aa3a7-ab48-4686-b15a-4333b52302a2-kube-api-access-ckhz8\") pod \"nova-cell1-db-create-85ntk\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.018394 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b50b989-57dd-4a03-99ad-c46a180a3136-operator-scripts\") pod \"nova-api-d87a-account-create-update-gd2x9\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.018574 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngxtc\" (UniqueName: \"kubernetes.io/projected/4b50b989-57dd-4a03-99ad-c46a180a3136-kube-api-access-ngxtc\") pod \"nova-api-d87a-account-create-update-gd2x9\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.018635 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f2aa3a7-ab48-4686-b15a-4333b52302a2-operator-scripts\") pod \"nova-cell1-db-create-85ntk\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.038861 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b50b989-57dd-4a03-99ad-c46a180a3136-operator-scripts\") pod \"nova-api-d87a-account-create-update-gd2x9\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.060280 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.150174 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngxtc\" (UniqueName: \"kubernetes.io/projected/4b50b989-57dd-4a03-99ad-c46a180a3136-kube-api-access-ngxtc\") pod \"nova-api-d87a-account-create-update-gd2x9\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.152776 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zklp\" (UniqueName: \"kubernetes.io/projected/676d06ca-f3de-4ce7-b782-0588cc433361-kube-api-access-7zklp\") pod \"nova-cell0-f8ab-account-create-update-htkmw\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.152906 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.152950 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9kjq\" (UniqueName: \"kubernetes.io/projected/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-kube-api-access-b9kjq\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.152977 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.153069 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/676d06ca-f3de-4ce7-b782-0588cc433361-operator-scripts\") pod \"nova-cell0-f8ab-account-create-update-htkmw\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.153128 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-log-httpd\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.153150 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f2aa3a7-ab48-4686-b15a-4333b52302a2-operator-scripts\") pod \"nova-cell1-db-create-85ntk\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.153192 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckhz8\" (UniqueName: \"kubernetes.io/projected/3f2aa3a7-ab48-4686-b15a-4333b52302a2-kube-api-access-ckhz8\") pod \"nova-cell1-db-create-85ntk\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.153213 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-config-data\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.153263 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-run-httpd\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.153287 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-scripts\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.169201 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f2aa3a7-ab48-4686-b15a-4333b52302a2-operator-scripts\") pod \"nova-cell1-db-create-85ntk\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.173062 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04d8e4b2-cc51-4600-b27f-01b215ff5279" path="/var/lib/kubelet/pods/04d8e4b2-cc51-4600-b27f-01b215ff5279/volumes" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.189971 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-b72f-account-create-update-29vrc"] Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.191451 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.196442 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckhz8\" (UniqueName: \"kubernetes.io/projected/3f2aa3a7-ab48-4686-b15a-4333b52302a2-kube-api-access-ckhz8\") pod \"nova-cell1-db-create-85ntk\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.196967 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.219249 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b72f-account-create-update-29vrc"] Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.254885 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-log-httpd\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255213 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-config-data\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255245 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-run-httpd\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255264 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3803000-bb1d-4d78-a52c-a754d805449b-operator-scripts\") pod \"nova-cell1-b72f-account-create-update-29vrc\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255290 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-scripts\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255303 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-log-httpd\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255371 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zklp\" (UniqueName: \"kubernetes.io/projected/676d06ca-f3de-4ce7-b782-0588cc433361-kube-api-access-7zklp\") pod \"nova-cell0-f8ab-account-create-update-htkmw\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255417 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255478 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9kjq\" (UniqueName: \"kubernetes.io/projected/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-kube-api-access-b9kjq\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255497 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255522 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrwjs\" (UniqueName: \"kubernetes.io/projected/d3803000-bb1d-4d78-a52c-a754d805449b-kube-api-access-jrwjs\") pod \"nova-cell1-b72f-account-create-update-29vrc\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.255544 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/676d06ca-f3de-4ce7-b782-0588cc433361-operator-scripts\") pod \"nova-cell0-f8ab-account-create-update-htkmw\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.256165 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/676d06ca-f3de-4ce7-b782-0588cc433361-operator-scripts\") pod \"nova-cell0-f8ab-account-create-update-htkmw\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.256509 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-run-httpd\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.260773 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.269437 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-scripts\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.274047 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-config-data\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.274173 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zklp\" (UniqueName: \"kubernetes.io/projected/676d06ca-f3de-4ce7-b782-0588cc433361-kube-api-access-7zklp\") pod \"nova-cell0-f8ab-account-create-update-htkmw\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.274404 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.278927 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9kjq\" (UniqueName: \"kubernetes.io/projected/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-kube-api-access-b9kjq\") pod \"ceilometer-0\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.319019 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.319713 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:54282->10.217.0.150:8443: read: connection reset by peer" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.340688 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.358494 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrwjs\" (UniqueName: \"kubernetes.io/projected/d3803000-bb1d-4d78-a52c-a754d805449b-kube-api-access-jrwjs\") pod \"nova-cell1-b72f-account-create-update-29vrc\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.358581 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3803000-bb1d-4d78-a52c-a754d805449b-operator-scripts\") pod \"nova-cell1-b72f-account-create-update-29vrc\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.359324 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3803000-bb1d-4d78-a52c-a754d805449b-operator-scripts\") pod \"nova-cell1-b72f-account-create-update-29vrc\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.381722 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrwjs\" (UniqueName: \"kubernetes.io/projected/d3803000-bb1d-4d78-a52c-a754d805449b-kube-api-access-jrwjs\") pod \"nova-cell1-b72f-account-create-update-29vrc\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.485476 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:22 crc kubenswrapper[4650]: E0201 07:43:22.506388 4650 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e572f25_ea86_45a7_b828_214b813f9d0c.slice/crio-conmon-a451bbea895b092fac95434c169fa8820f75c4dcfbd374cb478929932b3b5264.scope\": RecentStats: unable to find data in memory cache]" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.515038 4650 generic.go:334] "Generic (PLEG): container finished" podID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerID="a451bbea895b092fac95434c169fa8820f75c4dcfbd374cb478929932b3b5264" exitCode=0 Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.515103 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerDied","Data":"a451bbea895b092fac95434c169fa8820f75c4dcfbd374cb478929932b3b5264"} Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.515135 4650 scope.go:117] "RemoveContainer" containerID="038fc80dfb9fd47b73607b6e75c77545e7d8c10ea25cbba2f578bdb2c48b96af" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.516282 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.585808 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.713665 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9wwvn"] Feb 01 07:43:22 crc kubenswrapper[4650]: I0201 07:43:22.736784 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-86k28"] Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.005374 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f8ab-account-create-update-htkmw"] Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.130294 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-d87a-account-create-update-gd2x9"] Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.181472 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.224358 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.365312 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-85ntk"] Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.390610 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.532945 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.552488 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-86k28" event={"ID":"a6d6109a-7337-4d9b-bb82-b0f778d843c7","Type":"ContainerStarted","Data":"a37ed01bc81d8b11b267f4bc117b887d794544f411aee0319ba0e54edf32931f"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.552810 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-86k28" event={"ID":"a6d6109a-7337-4d9b-bb82-b0f778d843c7","Type":"ContainerStarted","Data":"cf77abe7f1cc3b498901e94f375638e043de88fc94c8d2562e6d79a2f24a4119"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.553422 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-log" containerID="cri-o://1c935daf8b1d75b92fd142680e79786dd3107c04ab2cc97709cc266abfbf30ee" gracePeriod=30 Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.553857 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-httpd" containerID="cri-o://a4fa062d160c18a5a3f6edd669ffb4eb1188180e2527d0ca9eb14320da88a755" gracePeriod=30 Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.557441 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerStarted","Data":"8d113045ff143f97272a9a12aa3f2dceccc7af6f77034e3351388c0492ed635b"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.564303 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" event={"ID":"676d06ca-f3de-4ce7-b782-0588cc433361","Type":"ContainerStarted","Data":"90f1cea73580259fcd1232e7b929db7e175c14e22df22695067e6c96d30ca0b8"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.568201 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-85ntk" event={"ID":"3f2aa3a7-ab48-4686-b15a-4333b52302a2","Type":"ContainerStarted","Data":"7a142eb2fd550552e25b51953e77ad15ea2ce50665f5ebad9fab7a8d0323ae71"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.585320 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wwvn" event={"ID":"6c7dfd53-9a47-402b-951c-e785181e81a2","Type":"ContainerStarted","Data":"5eb41c68237ec3cc6da92c18ba0065e2df3459fcd12210f2fea28d0e6566d418"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.585377 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wwvn" event={"ID":"6c7dfd53-9a47-402b-951c-e785181e81a2","Type":"ContainerStarted","Data":"5f767f8305201d33569f4a4515f1b4454fef3424e01dc068c38fedee6e164907"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.598789 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-d87a-account-create-update-gd2x9" event={"ID":"4b50b989-57dd-4a03-99ad-c46a180a3136","Type":"ContainerStarted","Data":"14be78d5a7809159d23596812690b7bffb3dcbfb1a8593fe718b9a10a34dd78c"} Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.621749 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-9wwvn" podStartSLOduration=2.6217306750000002 podStartE2EDuration="2.621730675s" podCreationTimestamp="2026-02-01 07:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:23.605457336 +0000 UTC m=+1202.328555581" watchObservedRunningTime="2026-02-01 07:43:23.621730675 +0000 UTC m=+1202.344828910" Feb 01 07:43:23 crc kubenswrapper[4650]: I0201 07:43:23.689643 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b72f-account-create-update-29vrc"] Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.016912 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.607277 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" event={"ID":"676d06ca-f3de-4ce7-b782-0588cc433361","Type":"ContainerStarted","Data":"91004af82472041ae0ff9ed06b49bb5ea68b64403a72337f6408e7f5f8701466"} Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.608360 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-85ntk" event={"ID":"3f2aa3a7-ab48-4686-b15a-4333b52302a2","Type":"ContainerStarted","Data":"875bb1d6c20e03a90968857d8f702027a85f51f6510abbd1505074ba3f2d143a"} Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.610312 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-d87a-account-create-update-gd2x9" event={"ID":"4b50b989-57dd-4a03-99ad-c46a180a3136","Type":"ContainerStarted","Data":"556271492f85f4aa5fdb1000f88331bfece93f7f4407518eda99552ba9aee135"} Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.614018 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b72f-account-create-update-29vrc" event={"ID":"d3803000-bb1d-4d78-a52c-a754d805449b","Type":"ContainerStarted","Data":"798dc26cfbbfc1eba8faa596d37c9989863ab7ff3efb0c903263608f507ab956"} Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.617602 4650 generic.go:334] "Generic (PLEG): container finished" podID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerID="1c935daf8b1d75b92fd142680e79786dd3107c04ab2cc97709cc266abfbf30ee" exitCode=143 Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.618249 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5ae063fc-da05-4f12-96aa-ea13d37dc9d0","Type":"ContainerDied","Data":"1c935daf8b1d75b92fd142680e79786dd3107c04ab2cc97709cc266abfbf30ee"} Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.623083 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" podStartSLOduration=3.6230663339999998 podStartE2EDuration="3.623066334s" podCreationTimestamp="2026-02-01 07:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:24.621727629 +0000 UTC m=+1203.344825874" watchObservedRunningTime="2026-02-01 07:43:24.623066334 +0000 UTC m=+1203.346164579" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.641822 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-85ntk" podStartSLOduration=3.641803898 podStartE2EDuration="3.641803898s" podCreationTimestamp="2026-02-01 07:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:24.638152352 +0000 UTC m=+1203.361250597" watchObservedRunningTime="2026-02-01 07:43:24.641803898 +0000 UTC m=+1203.364902143" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.657588 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-86k28" podStartSLOduration=3.657571564 podStartE2EDuration="3.657571564s" podCreationTimestamp="2026-02-01 07:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:24.656792593 +0000 UTC m=+1203.379890838" watchObservedRunningTime="2026-02-01 07:43:24.657571564 +0000 UTC m=+1203.380669809" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.675707 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-d87a-account-create-update-gd2x9" podStartSLOduration=3.675689481 podStartE2EDuration="3.675689481s" podCreationTimestamp="2026-02-01 07:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:24.670513395 +0000 UTC m=+1203.393611640" watchObservedRunningTime="2026-02-01 07:43:24.675689481 +0000 UTC m=+1203.398787716" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.842057 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.843961 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.844089 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.845079 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"4036202ac516e18523b1a52ae487b8712f118510898928b033522621b7f15ee2"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.845107 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.845141 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://4036202ac516e18523b1a52ae487b8712f118510898928b033522621b7f15ee2" gracePeriod=30 Feb 01 07:43:24 crc kubenswrapper[4650]: I0201 07:43:24.855113 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": EOF" Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.684563 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="092c036764bc43b6c02ff54e9eb3b67f429c327f120c40c9f66cf098fe79dc37" exitCode=1 Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.684628 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"092c036764bc43b6c02ff54e9eb3b67f429c327f120c40c9f66cf098fe79dc37"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.685717 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.685767 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.685834 4650 scope.go:117] "RemoveContainer" containerID="092c036764bc43b6c02ff54e9eb3b67f429c327f120c40c9f66cf098fe79dc37" Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.685853 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.689041 4650 generic.go:334] "Generic (PLEG): container finished" podID="3f2aa3a7-ab48-4686-b15a-4333b52302a2" containerID="875bb1d6c20e03a90968857d8f702027a85f51f6510abbd1505074ba3f2d143a" exitCode=0 Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.689118 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-85ntk" event={"ID":"3f2aa3a7-ab48-4686-b15a-4333b52302a2","Type":"ContainerDied","Data":"875bb1d6c20e03a90968857d8f702027a85f51f6510abbd1505074ba3f2d143a"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.701236 4650 generic.go:334] "Generic (PLEG): container finished" podID="6c7dfd53-9a47-402b-951c-e785181e81a2" containerID="5eb41c68237ec3cc6da92c18ba0065e2df3459fcd12210f2fea28d0e6566d418" exitCode=0 Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.701306 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wwvn" event={"ID":"6c7dfd53-9a47-402b-951c-e785181e81a2","Type":"ContainerDied","Data":"5eb41c68237ec3cc6da92c18ba0065e2df3459fcd12210f2fea28d0e6566d418"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.702845 4650 generic.go:334] "Generic (PLEG): container finished" podID="4b50b989-57dd-4a03-99ad-c46a180a3136" containerID="556271492f85f4aa5fdb1000f88331bfece93f7f4407518eda99552ba9aee135" exitCode=0 Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.702893 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-d87a-account-create-update-gd2x9" event={"ID":"4b50b989-57dd-4a03-99ad-c46a180a3136","Type":"ContainerDied","Data":"556271492f85f4aa5fdb1000f88331bfece93f7f4407518eda99552ba9aee135"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.704371 4650 generic.go:334] "Generic (PLEG): container finished" podID="a6d6109a-7337-4d9b-bb82-b0f778d843c7" containerID="a37ed01bc81d8b11b267f4bc117b887d794544f411aee0319ba0e54edf32931f" exitCode=0 Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.704424 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-86k28" event={"ID":"a6d6109a-7337-4d9b-bb82-b0f778d843c7","Type":"ContainerDied","Data":"a37ed01bc81d8b11b267f4bc117b887d794544f411aee0319ba0e54edf32931f"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.707053 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="4036202ac516e18523b1a52ae487b8712f118510898928b033522621b7f15ee2" exitCode=0 Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.707153 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"4036202ac516e18523b1a52ae487b8712f118510898928b033522621b7f15ee2"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.707211 4650 scope.go:117] "RemoveContainer" containerID="d98a001c0aed64d0aa6f36e0d05edfcf4acd588e3aabc82b8a4ef3faf3a106a7" Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.710898 4650 generic.go:334] "Generic (PLEG): container finished" podID="d3803000-bb1d-4d78-a52c-a754d805449b" containerID="13d013ec21c57ca9189304a2c395d16127f381d035294f356846444028efff15" exitCode=0 Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.710952 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b72f-account-create-update-29vrc" event={"ID":"d3803000-bb1d-4d78-a52c-a754d805449b","Type":"ContainerDied","Data":"13d013ec21c57ca9189304a2c395d16127f381d035294f356846444028efff15"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.715082 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerStarted","Data":"92b7cc402823e4e6dd302e13983a9350c54a14fa5e53e5f8856a858daaebecb6"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.717054 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" event={"ID":"676d06ca-f3de-4ce7-b782-0588cc433361","Type":"ContainerDied","Data":"91004af82472041ae0ff9ed06b49bb5ea68b64403a72337f6408e7f5f8701466"} Feb 01 07:43:25 crc kubenswrapper[4650]: I0201 07:43:25.716732 4650 generic.go:334] "Generic (PLEG): container finished" podID="676d06ca-f3de-4ce7-b782-0588cc433361" containerID="91004af82472041ae0ff9ed06b49bb5ea68b64403a72337f6408e7f5f8701466" exitCode=0 Feb 01 07:43:25 crc kubenswrapper[4650]: E0201 07:43:25.820059 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:25 crc kubenswrapper[4650]: E0201 07:43:25.899568 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:43:26 crc kubenswrapper[4650]: I0201 07:43:26.728676 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"e2b400407f3c9b656bd37f6ac6b6e597a65c3873e1eddea4b784083639dafeec"} Feb 01 07:43:26 crc kubenswrapper[4650]: I0201 07:43:26.730014 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:26 crc kubenswrapper[4650]: I0201 07:43:26.729269 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:26 crc kubenswrapper[4650]: E0201 07:43:26.731156 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:26 crc kubenswrapper[4650]: I0201 07:43:26.739353 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"e6dc364e85738df90c32cbd434759e3f0e7d1ab1e42c31023453e8704d13f08b"} Feb 01 07:43:26 crc kubenswrapper[4650]: I0201 07:43:26.740197 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:43:26 crc kubenswrapper[4650]: I0201 07:43:26.740271 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:43:26 crc kubenswrapper[4650]: I0201 07:43:26.740414 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:43:26 crc kubenswrapper[4650]: E0201 07:43:26.740767 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.205660 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.290278 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6d6109a-7337-4d9b-bb82-b0f778d843c7-operator-scripts\") pod \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.290625 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7wz7\" (UniqueName: \"kubernetes.io/projected/a6d6109a-7337-4d9b-bb82-b0f778d843c7-kube-api-access-l7wz7\") pod \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\" (UID: \"a6d6109a-7337-4d9b-bb82-b0f778d843c7\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.291643 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6d6109a-7337-4d9b-bb82-b0f778d843c7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a6d6109a-7337-4d9b-bb82-b0f778d843c7" (UID: "a6d6109a-7337-4d9b-bb82-b0f778d843c7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.300168 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6d6109a-7337-4d9b-bb82-b0f778d843c7-kube-api-access-l7wz7" (OuterVolumeSpecName: "kube-api-access-l7wz7") pod "a6d6109a-7337-4d9b-bb82-b0f778d843c7" (UID: "a6d6109a-7337-4d9b-bb82-b0f778d843c7"). InnerVolumeSpecName "kube-api-access-l7wz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.395614 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6d6109a-7337-4d9b-bb82-b0f778d843c7-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.395652 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7wz7\" (UniqueName: \"kubernetes.io/projected/a6d6109a-7337-4d9b-bb82-b0f778d843c7-kube-api-access-l7wz7\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.474137 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.535985 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.571978 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.599288 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f2aa3a7-ab48-4686-b15a-4333b52302a2-operator-scripts\") pod \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.599383 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngxtc\" (UniqueName: \"kubernetes.io/projected/4b50b989-57dd-4a03-99ad-c46a180a3136-kube-api-access-ngxtc\") pod \"4b50b989-57dd-4a03-99ad-c46a180a3136\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.599464 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckhz8\" (UniqueName: \"kubernetes.io/projected/3f2aa3a7-ab48-4686-b15a-4333b52302a2-kube-api-access-ckhz8\") pod \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\" (UID: \"3f2aa3a7-ab48-4686-b15a-4333b52302a2\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.600395 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b50b989-57dd-4a03-99ad-c46a180a3136-operator-scripts\") pod \"4b50b989-57dd-4a03-99ad-c46a180a3136\" (UID: \"4b50b989-57dd-4a03-99ad-c46a180a3136\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.602977 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b50b989-57dd-4a03-99ad-c46a180a3136-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b50b989-57dd-4a03-99ad-c46a180a3136" (UID: "4b50b989-57dd-4a03-99ad-c46a180a3136"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.607264 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f2aa3a7-ab48-4686-b15a-4333b52302a2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3f2aa3a7-ab48-4686-b15a-4333b52302a2" (UID: "3f2aa3a7-ab48-4686-b15a-4333b52302a2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.610141 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f2aa3a7-ab48-4686-b15a-4333b52302a2-kube-api-access-ckhz8" (OuterVolumeSpecName: "kube-api-access-ckhz8") pod "3f2aa3a7-ab48-4686-b15a-4333b52302a2" (UID: "3f2aa3a7-ab48-4686-b15a-4333b52302a2"). InnerVolumeSpecName "kube-api-access-ckhz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.616479 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b50b989-57dd-4a03-99ad-c46a180a3136-kube-api-access-ngxtc" (OuterVolumeSpecName: "kube-api-access-ngxtc") pod "4b50b989-57dd-4a03-99ad-c46a180a3136" (UID: "4b50b989-57dd-4a03-99ad-c46a180a3136"). InnerVolumeSpecName "kube-api-access-ngxtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.637041 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.676643 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.676928 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-log" containerID="cri-o://89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24" gracePeriod=30 Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.677111 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-httpd" containerID="cri-o://27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd" gracePeriod=30 Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.703136 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrwjs\" (UniqueName: \"kubernetes.io/projected/d3803000-bb1d-4d78-a52c-a754d805449b-kube-api-access-jrwjs\") pod \"d3803000-bb1d-4d78-a52c-a754d805449b\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.703278 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m29d\" (UniqueName: \"kubernetes.io/projected/6c7dfd53-9a47-402b-951c-e785181e81a2-kube-api-access-7m29d\") pod \"6c7dfd53-9a47-402b-951c-e785181e81a2\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.703368 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3803000-bb1d-4d78-a52c-a754d805449b-operator-scripts\") pod \"d3803000-bb1d-4d78-a52c-a754d805449b\" (UID: \"d3803000-bb1d-4d78-a52c-a754d805449b\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.703388 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c7dfd53-9a47-402b-951c-e785181e81a2-operator-scripts\") pod \"6c7dfd53-9a47-402b-951c-e785181e81a2\" (UID: \"6c7dfd53-9a47-402b-951c-e785181e81a2\") " Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.704257 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f2aa3a7-ab48-4686-b15a-4333b52302a2-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.704272 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngxtc\" (UniqueName: \"kubernetes.io/projected/4b50b989-57dd-4a03-99ad-c46a180a3136-kube-api-access-ngxtc\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.704286 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckhz8\" (UniqueName: \"kubernetes.io/projected/3f2aa3a7-ab48-4686-b15a-4333b52302a2-kube-api-access-ckhz8\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.704295 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b50b989-57dd-4a03-99ad-c46a180a3136-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.711557 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3803000-bb1d-4d78-a52c-a754d805449b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d3803000-bb1d-4d78-a52c-a754d805449b" (UID: "d3803000-bb1d-4d78-a52c-a754d805449b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.731433 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c7dfd53-9a47-402b-951c-e785181e81a2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6c7dfd53-9a47-402b-951c-e785181e81a2" (UID: "6c7dfd53-9a47-402b-951c-e785181e81a2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.760345 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c7dfd53-9a47-402b-951c-e785181e81a2-kube-api-access-7m29d" (OuterVolumeSpecName: "kube-api-access-7m29d") pod "6c7dfd53-9a47-402b-951c-e785181e81a2" (UID: "6c7dfd53-9a47-402b-951c-e785181e81a2"). InnerVolumeSpecName "kube-api-access-7m29d". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.779625 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wwvn" event={"ID":"6c7dfd53-9a47-402b-951c-e785181e81a2","Type":"ContainerDied","Data":"5f767f8305201d33569f4a4515f1b4454fef3424e01dc068c38fedee6e164907"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.779661 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f767f8305201d33569f4a4515f1b4454fef3424e01dc068c38fedee6e164907" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.779738 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wwvn" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.781865 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-d87a-account-create-update-gd2x9" event={"ID":"4b50b989-57dd-4a03-99ad-c46a180a3136","Type":"ContainerDied","Data":"14be78d5a7809159d23596812690b7bffb3dcbfb1a8593fe718b9a10a34dd78c"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.781971 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14be78d5a7809159d23596812690b7bffb3dcbfb1a8593fe718b9a10a34dd78c" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.782181 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d87a-account-create-update-gd2x9" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.784680 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3803000-bb1d-4d78-a52c-a754d805449b-kube-api-access-jrwjs" (OuterVolumeSpecName: "kube-api-access-jrwjs") pod "d3803000-bb1d-4d78-a52c-a754d805449b" (UID: "d3803000-bb1d-4d78-a52c-a754d805449b"). InnerVolumeSpecName "kube-api-access-jrwjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.798037 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-86k28" event={"ID":"a6d6109a-7337-4d9b-bb82-b0f778d843c7","Type":"ContainerDied","Data":"cf77abe7f1cc3b498901e94f375638e043de88fc94c8d2562e6d79a2f24a4119"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.798148 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf77abe7f1cc3b498901e94f375638e043de88fc94c8d2562e6d79a2f24a4119" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.798276 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-86k28" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.808402 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrwjs\" (UniqueName: \"kubernetes.io/projected/d3803000-bb1d-4d78-a52c-a754d805449b-kube-api-access-jrwjs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.808707 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7m29d\" (UniqueName: \"kubernetes.io/projected/6c7dfd53-9a47-402b-951c-e785181e81a2-kube-api-access-7m29d\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.808717 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c7dfd53-9a47-402b-951c-e785181e81a2-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.808725 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3803000-bb1d-4d78-a52c-a754d805449b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.816274 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerStarted","Data":"90761690ed8c94400318720aae86ed4c2cf0cdd7604bfb7eba3aa657db1f20d1"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.821267 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5ae063fc-da05-4f12-96aa-ea13d37dc9d0","Type":"ContainerDied","Data":"a4fa062d160c18a5a3f6edd669ffb4eb1188180e2527d0ca9eb14320da88a755"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.820435 4650 generic.go:334] "Generic (PLEG): container finished" podID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerID="a4fa062d160c18a5a3f6edd669ffb4eb1188180e2527d0ca9eb14320da88a755" exitCode=0 Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.824827 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" event={"ID":"676d06ca-f3de-4ce7-b782-0588cc433361","Type":"ContainerDied","Data":"90f1cea73580259fcd1232e7b929db7e175c14e22df22695067e6c96d30ca0b8"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.825651 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90f1cea73580259fcd1232e7b929db7e175c14e22df22695067e6c96d30ca0b8" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.827945 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-85ntk" event={"ID":"3f2aa3a7-ab48-4686-b15a-4333b52302a2","Type":"ContainerDied","Data":"7a142eb2fd550552e25b51953e77ad15ea2ce50665f5ebad9fab7a8d0323ae71"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.827985 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a142eb2fd550552e25b51953e77ad15ea2ce50665f5ebad9fab7a8d0323ae71" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.828071 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-85ntk" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.849728 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:27 crc kubenswrapper[4650]: E0201 07:43:27.850044 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.850177 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b72f-account-create-update-29vrc" event={"ID":"d3803000-bb1d-4d78-a52c-a754d805449b","Type":"ContainerDied","Data":"798dc26cfbbfc1eba8faa596d37c9989863ab7ff3efb0c903263608f507ab956"} Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.850199 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="798dc26cfbbfc1eba8faa596d37c9989863ab7ff3efb0c903263608f507ab956" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.850331 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b72f-account-create-update-29vrc" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.886868 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:27 crc kubenswrapper[4650]: I0201 07:43:27.933724 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012020 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-public-tls-certs\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012218 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-httpd-run\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012304 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-config-data\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012392 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012521 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdb9p\" (UniqueName: \"kubernetes.io/projected/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-kube-api-access-cdb9p\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012631 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zklp\" (UniqueName: \"kubernetes.io/projected/676d06ca-f3de-4ce7-b782-0588cc433361-kube-api-access-7zklp\") pod \"676d06ca-f3de-4ce7-b782-0588cc433361\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012804 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-logs\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012870 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.012971 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-scripts\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.013073 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-combined-ca-bundle\") pod \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\" (UID: \"5ae063fc-da05-4f12-96aa-ea13d37dc9d0\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.013552 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/676d06ca-f3de-4ce7-b782-0588cc433361-operator-scripts\") pod \"676d06ca-f3de-4ce7-b782-0588cc433361\" (UID: \"676d06ca-f3de-4ce7-b782-0588cc433361\") " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.014474 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.017860 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-logs" (OuterVolumeSpecName: "logs") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.017966 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/676d06ca-f3de-4ce7-b782-0588cc433361-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "676d06ca-f3de-4ce7-b782-0588cc433361" (UID: "676d06ca-f3de-4ce7-b782-0588cc433361"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.018220 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/676d06ca-f3de-4ce7-b782-0588cc433361-kube-api-access-7zklp" (OuterVolumeSpecName: "kube-api-access-7zklp") pod "676d06ca-f3de-4ce7-b782-0588cc433361" (UID: "676d06ca-f3de-4ce7-b782-0588cc433361"). InnerVolumeSpecName "kube-api-access-7zklp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.018812 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-kube-api-access-cdb9p" (OuterVolumeSpecName: "kube-api-access-cdb9p") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "kube-api-access-cdb9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.019569 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.021860 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-scripts" (OuterVolumeSpecName: "scripts") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.052684 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.098365 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.102794 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-config-data" (OuterVolumeSpecName: "config-data") pod "5ae063fc-da05-4f12-96aa-ea13d37dc9d0" (UID: "5ae063fc-da05-4f12-96aa-ea13d37dc9d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116376 4650 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116406 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116429 4650 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116440 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdb9p\" (UniqueName: \"kubernetes.io/projected/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-kube-api-access-cdb9p\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116452 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zklp\" (UniqueName: \"kubernetes.io/projected/676d06ca-f3de-4ce7-b782-0588cc433361-kube-api-access-7zklp\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116460 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116468 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116477 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae063fc-da05-4f12-96aa-ea13d37dc9d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.116485 4650 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/676d06ca-f3de-4ce7-b782-0588cc433361-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.136480 4650 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.217550 4650 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.857568 4650 generic.go:334] "Generic (PLEG): container finished" podID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerID="89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24" exitCode=143 Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.857661 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fb6cca-a9d3-4205-a078-847687c48f0b","Type":"ContainerDied","Data":"89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24"} Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.861396 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerStarted","Data":"38b032002437edef1c43901282f8b456ec98ce2cd0dc7686dce5f30475839400"} Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.863267 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8ab-account-create-update-htkmw" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.863569 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.864302 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5ae063fc-da05-4f12-96aa-ea13d37dc9d0","Type":"ContainerDied","Data":"186b99d8c97882526f9d64a391f47b3f338f5986b25474a9b476e640de434b3d"} Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.864390 4650 scope.go:117] "RemoveContainer" containerID="a4fa062d160c18a5a3f6edd669ffb4eb1188180e2527d0ca9eb14320da88a755" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.913778 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.913917 4650 scope.go:117] "RemoveContainer" containerID="1c935daf8b1d75b92fd142680e79786dd3107c04ab2cc97709cc266abfbf30ee" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.922453 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962180 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962747 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f2aa3a7-ab48-4686-b15a-4333b52302a2" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962769 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f2aa3a7-ab48-4686-b15a-4333b52302a2" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962784 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6d6109a-7337-4d9b-bb82-b0f778d843c7" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962793 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6d6109a-7337-4d9b-bb82-b0f778d843c7" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962807 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="676d06ca-f3de-4ce7-b782-0588cc433361" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962829 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="676d06ca-f3de-4ce7-b782-0588cc433361" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962850 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3803000-bb1d-4d78-a52c-a754d805449b" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962858 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3803000-bb1d-4d78-a52c-a754d805449b" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962877 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-httpd" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962887 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-httpd" Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962917 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-log" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962926 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-log" Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962944 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b50b989-57dd-4a03-99ad-c46a180a3136" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962953 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b50b989-57dd-4a03-99ad-c46a180a3136" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: E0201 07:43:28.962969 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c7dfd53-9a47-402b-951c-e785181e81a2" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.962977 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c7dfd53-9a47-402b-951c-e785181e81a2" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963234 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3803000-bb1d-4d78-a52c-a754d805449b" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963257 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-httpd" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963271 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c7dfd53-9a47-402b-951c-e785181e81a2" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963288 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b50b989-57dd-4a03-99ad-c46a180a3136" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963306 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f2aa3a7-ab48-4686-b15a-4333b52302a2" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963322 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="676d06ca-f3de-4ce7-b782-0588cc433361" containerName="mariadb-account-create-update" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963334 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-log" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.963345 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6d6109a-7337-4d9b-bb82-b0f778d843c7" containerName="mariadb-database-create" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.964621 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.968459 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 01 07:43:28 crc kubenswrapper[4650]: I0201 07:43:28.982828 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:28.999934 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033372 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/34f640f2-11f5-429e-a5e2-41cffad03e78-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033471 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033501 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033520 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033541 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34f640f2-11f5-429e-a5e2-41cffad03e78-logs\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033592 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-config-data\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033675 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-scripts\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.033691 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqqtl\" (UniqueName: \"kubernetes.io/projected/34f640f2-11f5-429e-a5e2-41cffad03e78-kube-api-access-kqqtl\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.134778 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34f640f2-11f5-429e-a5e2-41cffad03e78-logs\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.134851 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-config-data\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.134917 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-scripts\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.134936 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqqtl\" (UniqueName: \"kubernetes.io/projected/34f640f2-11f5-429e-a5e2-41cffad03e78-kube-api-access-kqqtl\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.134975 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/34f640f2-11f5-429e-a5e2-41cffad03e78-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.135019 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.135058 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.135073 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.136087 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34f640f2-11f5-429e-a5e2-41cffad03e78-logs\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.136634 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/34f640f2-11f5-429e-a5e2-41cffad03e78-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.140108 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.142378 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.145787 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.146762 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-config-data\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.147243 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34f640f2-11f5-429e-a5e2-41cffad03e78-scripts\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.155587 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqqtl\" (UniqueName: \"kubernetes.io/projected/34f640f2-11f5-429e-a5e2-41cffad03e78-kube-api-access-kqqtl\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.174650 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"34f640f2-11f5-429e-a5e2-41cffad03e78\") " pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.300444 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.925901 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 01 07:43:29 crc kubenswrapper[4650]: W0201 07:43:29.932705 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34f640f2_11f5_429e_a5e2_41cffad03e78.slice/crio-4cf6a15c63ebd28834932207f62b799c25e1d5a9f70e570367327a065318d501 WatchSource:0}: Error finding container 4cf6a15c63ebd28834932207f62b799c25e1d5a9f70e570367327a065318d501: Status 404 returned error can't find the container with id 4cf6a15c63ebd28834932207f62b799c25e1d5a9f70e570367327a065318d501 Feb 01 07:43:29 crc kubenswrapper[4650]: I0201 07:43:29.979721 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" path="/var/lib/kubelet/pods/5ae063fc-da05-4f12-96aa-ea13d37dc9d0/volumes" Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.806001 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.892681 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerStarted","Data":"1399d2c2a60d838895067891dd86a2e9d756ff1e260edf339d2a622b7ee50da7"} Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.892815 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.905207 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-central-agent" containerID="cri-o://92b7cc402823e4e6dd302e13983a9350c54a14fa5e53e5f8856a858daaebecb6" gracePeriod=30 Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.905531 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="proxy-httpd" containerID="cri-o://1399d2c2a60d838895067891dd86a2e9d756ff1e260edf339d2a622b7ee50da7" gracePeriod=30 Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.905571 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="sg-core" containerID="cri-o://38b032002437edef1c43901282f8b456ec98ce2cd0dc7686dce5f30475839400" gracePeriod=30 Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.905619 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-notification-agent" containerID="cri-o://90761690ed8c94400318720aae86ed4c2cf0cdd7604bfb7eba3aa657db1f20d1" gracePeriod=30 Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.911189 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"34f640f2-11f5-429e-a5e2-41cffad03e78","Type":"ContainerStarted","Data":"605c8584e3809b777656abf665ccc08577d50b09f43e1bf7b4e9e88b58920790"} Feb 01 07:43:30 crc kubenswrapper[4650]: I0201 07:43:30.911224 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"34f640f2-11f5-429e-a5e2-41cffad03e78","Type":"ContainerStarted","Data":"4cf6a15c63ebd28834932207f62b799c25e1d5a9f70e570367327a065318d501"} Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.591330 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.611091 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.222209316 podStartE2EDuration="10.611072901s" podCreationTimestamp="2026-02-01 07:43:21 +0000 UTC" firstStartedPulling="2026-02-01 07:43:23.488450283 +0000 UTC m=+1202.211548528" lastFinishedPulling="2026-02-01 07:43:29.877313868 +0000 UTC m=+1208.600412113" observedRunningTime="2026-02-01 07:43:30.926145799 +0000 UTC m=+1209.649244054" watchObservedRunningTime="2026-02-01 07:43:31.611072901 +0000 UTC m=+1210.334171146" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.686914 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-config-data\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.686971 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-combined-ca-bundle\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.687070 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-scripts\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.687130 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-internal-tls-certs\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.687151 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-httpd-run\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.687209 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.687254 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kmtm\" (UniqueName: \"kubernetes.io/projected/43fb6cca-a9d3-4205-a078-847687c48f0b-kube-api-access-4kmtm\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.687328 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-logs\") pod \"43fb6cca-a9d3-4205-a078-847687c48f0b\" (UID: \"43fb6cca-a9d3-4205-a078-847687c48f0b\") " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.687998 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-logs" (OuterVolumeSpecName: "logs") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.688486 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.694423 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.701831 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-scripts" (OuterVolumeSpecName: "scripts") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.706911 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43fb6cca-a9d3-4205-a078-847687c48f0b-kube-api-access-4kmtm" (OuterVolumeSpecName: "kube-api-access-4kmtm") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "kube-api-access-4kmtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.745819 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.747236 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-config-data" (OuterVolumeSpecName: "config-data") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.765876 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "43fb6cca-a9d3-4205-a078-847687c48f0b" (UID: "43fb6cca-a9d3-4205-a078-847687c48f0b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789243 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789284 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789297 4650 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789312 4650 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789340 4650 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789350 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kmtm\" (UniqueName: \"kubernetes.io/projected/43fb6cca-a9d3-4205-a078-847687c48f0b-kube-api-access-4kmtm\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789360 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fb6cca-a9d3-4205-a078-847687c48f0b-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.789367 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fb6cca-a9d3-4205-a078-847687c48f0b-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.808744 4650 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.891158 4650 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.921729 4650 generic.go:334] "Generic (PLEG): container finished" podID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerID="1399d2c2a60d838895067891dd86a2e9d756ff1e260edf339d2a622b7ee50da7" exitCode=0 Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.921951 4650 generic.go:334] "Generic (PLEG): container finished" podID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerID="38b032002437edef1c43901282f8b456ec98ce2cd0dc7686dce5f30475839400" exitCode=2 Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.922041 4650 generic.go:334] "Generic (PLEG): container finished" podID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerID="90761690ed8c94400318720aae86ed4c2cf0cdd7604bfb7eba3aa657db1f20d1" exitCode=0 Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.921928 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerDied","Data":"1399d2c2a60d838895067891dd86a2e9d756ff1e260edf339d2a622b7ee50da7"} Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.922598 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerDied","Data":"38b032002437edef1c43901282f8b456ec98ce2cd0dc7686dce5f30475839400"} Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.922788 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerDied","Data":"90761690ed8c94400318720aae86ed4c2cf0cdd7604bfb7eba3aa657db1f20d1"} Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.924294 4650 generic.go:334] "Generic (PLEG): container finished" podID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerID="27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd" exitCode=0 Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.924413 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fb6cca-a9d3-4205-a078-847687c48f0b","Type":"ContainerDied","Data":"27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd"} Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.924496 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fb6cca-a9d3-4205-a078-847687c48f0b","Type":"ContainerDied","Data":"6bc366f856ccc93b09d1ea04c34fe8a683e2d347f24c316be3e841dc887be5b4"} Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.924576 4650 scope.go:117] "RemoveContainer" containerID="27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.924744 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.931474 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"34f640f2-11f5-429e-a5e2-41cffad03e78","Type":"ContainerStarted","Data":"c25691877bc564b235b0b766644cc2c3553b1ce941d234a1c43b17c93e4c3d05"} Feb 01 07:43:31 crc kubenswrapper[4650]: I0201 07:43:31.998418 4650 scope.go:117] "RemoveContainer" containerID="89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.001159 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.00114172 podStartE2EDuration="4.00114172s" podCreationTimestamp="2026-02-01 07:43:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:31.96018936 +0000 UTC m=+1210.683287605" watchObservedRunningTime="2026-02-01 07:43:32.00114172 +0000 UTC m=+1210.724239975" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.041317 4650 scope.go:117] "RemoveContainer" containerID="27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd" Feb 01 07:43:32 crc kubenswrapper[4650]: E0201 07:43:32.049237 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd\": container with ID starting with 27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd not found: ID does not exist" containerID="27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.049286 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd"} err="failed to get container status \"27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd\": rpc error: code = NotFound desc = could not find container \"27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd\": container with ID starting with 27395eb5f1f557a4e95895903824ddd90c21115120a26935852a05dd88bbb8bd not found: ID does not exist" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.049316 4650 scope.go:117] "RemoveContainer" containerID="89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24" Feb 01 07:43:32 crc kubenswrapper[4650]: E0201 07:43:32.050078 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24\": container with ID starting with 89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24 not found: ID does not exist" containerID="89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.050104 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24"} err="failed to get container status \"89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24\": rpc error: code = NotFound desc = could not find container \"89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24\": container with ID starting with 89293ad56417f795db16526d2d0b08092a089820e3d86259cd257e147d648f24 not found: ID does not exist" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.066925 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.084547 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.099119 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:43:32 crc kubenswrapper[4650]: E0201 07:43:32.099550 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-log" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.099568 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-log" Feb 01 07:43:32 crc kubenswrapper[4650]: E0201 07:43:32.099581 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-httpd" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.099587 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-httpd" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.099801 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-httpd" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.099816 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" containerName="glance-log" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.100773 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.103837 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.104596 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.120062 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.189401 4650 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod7467859e-a792-4959-bd51-d353099352bd"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod7467859e-a792-4959-bd51-d353099352bd] : Timed out while waiting for systemd to remove kubepods-besteffort-pod7467859e_a792_4959_bd51_d353099352bd.slice" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.307782 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdnxl\" (UniqueName: \"kubernetes.io/projected/33c7507f-4100-4022-9c51-482d09197d58-kube-api-access-zdnxl\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.308428 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.308467 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.308500 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-scripts\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.308565 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33c7507f-4100-4022-9c51-482d09197d58-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.308608 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-config-data\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.308628 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33c7507f-4100-4022-9c51-482d09197d58-logs\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.308661 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.373055 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9hm6l"] Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.374220 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.376161 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.380117 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-fw65n" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.380176 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.387436 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9hm6l"] Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410662 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33c7507f-4100-4022-9c51-482d09197d58-logs\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410744 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410779 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410802 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-config-data\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410824 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdnxl\" (UniqueName: \"kubernetes.io/projected/33c7507f-4100-4022-9c51-482d09197d58-kube-api-access-zdnxl\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410859 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410876 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8xws\" (UniqueName: \"kubernetes.io/projected/364e7c65-e9d5-4a41-b87b-62b8da17e636-kube-api-access-h8xws\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410904 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.410938 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-scripts\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.411006 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33c7507f-4100-4022-9c51-482d09197d58-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.411069 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-scripts\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.411087 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-config-data\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.414217 4650 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.416370 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33c7507f-4100-4022-9c51-482d09197d58-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.418472 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33c7507f-4100-4022-9c51-482d09197d58-logs\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.422156 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-config-data\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.424811 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.425279 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.425644 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33c7507f-4100-4022-9c51-482d09197d58-scripts\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.472718 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdnxl\" (UniqueName: \"kubernetes.io/projected/33c7507f-4100-4022-9c51-482d09197d58-kube-api-access-zdnxl\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.491590 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"33c7507f-4100-4022-9c51-482d09197d58\") " pod="openstack/glance-default-internal-api-0" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.515273 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8xws\" (UniqueName: \"kubernetes.io/projected/364e7c65-e9d5-4a41-b87b-62b8da17e636-kube-api-access-h8xws\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.515437 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-scripts\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.515482 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.515503 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-config-data\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.528615 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.534229 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-scripts\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.536458 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-config-data\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.598785 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8xws\" (UniqueName: \"kubernetes.io/projected/364e7c65-e9d5-4a41-b87b-62b8da17e636-kube-api-access-h8xws\") pod \"nova-cell0-conductor-db-sync-9hm6l\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.698844 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:32 crc kubenswrapper[4650]: I0201 07:43:32.734125 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:33 crc kubenswrapper[4650]: I0201 07:43:33.190503 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9hm6l"] Feb 01 07:43:33 crc kubenswrapper[4650]: I0201 07:43:33.389927 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 01 07:43:33 crc kubenswrapper[4650]: W0201 07:43:33.394847 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33c7507f_4100_4022_9c51_482d09197d58.slice/crio-7906118fb4459b844be9e5e0bdd3123a44fd48f8b991b81dcb99af623d9fc5c5 WatchSource:0}: Error finding container 7906118fb4459b844be9e5e0bdd3123a44fd48f8b991b81dcb99af623d9fc5c5: Status 404 returned error can't find the container with id 7906118fb4459b844be9e5e0bdd3123a44fd48f8b991b81dcb99af623d9fc5c5 Feb 01 07:43:33 crc kubenswrapper[4650]: I0201 07:43:33.807910 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:33 crc kubenswrapper[4650]: I0201 07:43:33.985437 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43fb6cca-a9d3-4205-a078-847687c48f0b" path="/var/lib/kubelet/pods/43fb6cca-a9d3-4205-a078-847687c48f0b/volumes" Feb 01 07:43:33 crc kubenswrapper[4650]: I0201 07:43:33.986281 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" event={"ID":"364e7c65-e9d5-4a41-b87b-62b8da17e636","Type":"ContainerStarted","Data":"4fa78124ab6510f2f49fe3d173dab67295c3b5f60974417416d742eff4d2e128"} Feb 01 07:43:33 crc kubenswrapper[4650]: I0201 07:43:33.986309 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33c7507f-4100-4022-9c51-482d09197d58","Type":"ContainerStarted","Data":"5244f4cae9f8d8ce27ceed21bc5912566c3e6919dfc2a235900df1bcdac406df"} Feb 01 07:43:33 crc kubenswrapper[4650]: I0201 07:43:33.986320 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33c7507f-4100-4022-9c51-482d09197d58","Type":"ContainerStarted","Data":"7906118fb4459b844be9e5e0bdd3123a44fd48f8b991b81dcb99af623d9fc5c5"} Feb 01 07:43:34 crc kubenswrapper[4650]: I0201 07:43:34.016993 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Feb 01 07:43:34 crc kubenswrapper[4650]: I0201 07:43:34.804844 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:34 crc kubenswrapper[4650]: I0201 07:43:34.991584 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33c7507f-4100-4022-9c51-482d09197d58","Type":"ContainerStarted","Data":"bb8a83da31c86760b82f53477b90d8a84d94d292dfce9bdae7cf986d3b920695"} Feb 01 07:43:35 crc kubenswrapper[4650]: I0201 07:43:35.028467 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.028451084 podStartE2EDuration="3.028451084s" podCreationTimestamp="2026-02-01 07:43:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:35.017207367 +0000 UTC m=+1213.740305632" watchObservedRunningTime="2026-02-01 07:43:35.028451084 +0000 UTC m=+1213.751549329" Feb 01 07:43:36 crc kubenswrapper[4650]: I0201 07:43:36.803602 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:36 crc kubenswrapper[4650]: I0201 07:43:36.804481 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:36 crc kubenswrapper[4650]: I0201 07:43:36.805756 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"e2b400407f3c9b656bd37f6ac6b6e597a65c3873e1eddea4b784083639dafeec"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:43:36 crc kubenswrapper[4650]: I0201 07:43:36.805778 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:36 crc kubenswrapper[4650]: I0201 07:43:36.805941 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://e2b400407f3c9b656bd37f6ac6b6e597a65c3873e1eddea4b784083639dafeec" gracePeriod=30 Feb 01 07:43:36 crc kubenswrapper[4650]: I0201 07:43:36.813321 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": EOF" Feb 01 07:43:37 crc kubenswrapper[4650]: I0201 07:43:37.049279 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="e2b400407f3c9b656bd37f6ac6b6e597a65c3873e1eddea4b784083639dafeec" exitCode=0 Feb 01 07:43:37 crc kubenswrapper[4650]: I0201 07:43:37.049331 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"e2b400407f3c9b656bd37f6ac6b6e597a65c3873e1eddea4b784083639dafeec"} Feb 01 07:43:37 crc kubenswrapper[4650]: I0201 07:43:37.049373 4650 scope.go:117] "RemoveContainer" containerID="4036202ac516e18523b1a52ae487b8712f118510898928b033522621b7f15ee2" Feb 01 07:43:38 crc kubenswrapper[4650]: I0201 07:43:38.077176 4650 generic.go:334] "Generic (PLEG): container finished" podID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerID="92b7cc402823e4e6dd302e13983a9350c54a14fa5e53e5f8856a858daaebecb6" exitCode=0 Feb 01 07:43:38 crc kubenswrapper[4650]: I0201 07:43:38.077250 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerDied","Data":"92b7cc402823e4e6dd302e13983a9350c54a14fa5e53e5f8856a858daaebecb6"} Feb 01 07:43:38 crc kubenswrapper[4650]: I0201 07:43:38.965608 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:43:38 crc kubenswrapper[4650]: I0201 07:43:38.965675 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:43:38 crc kubenswrapper[4650]: I0201 07:43:38.965761 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:43:39 crc kubenswrapper[4650]: I0201 07:43:39.300710 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 01 07:43:39 crc kubenswrapper[4650]: I0201 07:43:39.300774 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 01 07:43:39 crc kubenswrapper[4650]: I0201 07:43:39.346269 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 01 07:43:39 crc kubenswrapper[4650]: I0201 07:43:39.350879 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 01 07:43:39 crc kubenswrapper[4650]: I0201 07:43:39.800590 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": dial tcp 10.217.0.173:8080: connect: connection refused" Feb 01 07:43:40 crc kubenswrapper[4650]: I0201 07:43:40.092822 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 01 07:43:40 crc kubenswrapper[4650]: I0201 07:43:40.093075 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.000059 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.107247 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-sg-core-conf-yaml\") pod \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.107314 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-config-data\") pod \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.107376 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-log-httpd\") pod \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.107405 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-combined-ca-bundle\") pod \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.107440 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-run-httpd\") pod \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.107491 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-scripts\") pod \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.107533 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9kjq\" (UniqueName: \"kubernetes.io/projected/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-kube-api-access-b9kjq\") pod \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\" (UID: \"9b67d9d8-935c-4efe-85aa-46fa51e8da9a\") " Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.109541 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9b67d9d8-935c-4efe-85aa-46fa51e8da9a" (UID: "9b67d9d8-935c-4efe-85aa-46fa51e8da9a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.110532 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9b67d9d8-935c-4efe-85aa-46fa51e8da9a" (UID: "9b67d9d8-935c-4efe-85aa-46fa51e8da9a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.115177 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.115611 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b67d9d8-935c-4efe-85aa-46fa51e8da9a","Type":"ContainerDied","Data":"8d113045ff143f97272a9a12aa3f2dceccc7af6f77034e3351388c0492ed635b"} Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.115644 4650 scope.go:117] "RemoveContainer" containerID="1399d2c2a60d838895067891dd86a2e9d756ff1e260edf339d2a622b7ee50da7" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.119375 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-scripts" (OuterVolumeSpecName: "scripts") pod "9b67d9d8-935c-4efe-85aa-46fa51e8da9a" (UID: "9b67d9d8-935c-4efe-85aa-46fa51e8da9a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.123417 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-kube-api-access-b9kjq" (OuterVolumeSpecName: "kube-api-access-b9kjq") pod "9b67d9d8-935c-4efe-85aa-46fa51e8da9a" (UID: "9b67d9d8-935c-4efe-85aa-46fa51e8da9a"). InnerVolumeSpecName "kube-api-access-b9kjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.138439 4650 scope.go:117] "RemoveContainer" containerID="38b032002437edef1c43901282f8b456ec98ce2cd0dc7686dce5f30475839400" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.161194 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9b67d9d8-935c-4efe-85aa-46fa51e8da9a" (UID: "9b67d9d8-935c-4efe-85aa-46fa51e8da9a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.177214 4650 scope.go:117] "RemoveContainer" containerID="90761690ed8c94400318720aae86ed4c2cf0cdd7604bfb7eba3aa657db1f20d1" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.208098 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b67d9d8-935c-4efe-85aa-46fa51e8da9a" (UID: "9b67d9d8-935c-4efe-85aa-46fa51e8da9a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.209352 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.209381 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.209393 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.209406 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.209416 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9kjq\" (UniqueName: \"kubernetes.io/projected/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-kube-api-access-b9kjq\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.209425 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.230096 4650 scope.go:117] "RemoveContainer" containerID="92b7cc402823e4e6dd302e13983a9350c54a14fa5e53e5f8856a858daaebecb6" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.238485 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-config-data" (OuterVolumeSpecName: "config-data") pod "9b67d9d8-935c-4efe-85aa-46fa51e8da9a" (UID: "9b67d9d8-935c-4efe-85aa-46fa51e8da9a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.311597 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b67d9d8-935c-4efe-85aa-46fa51e8da9a-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.476422 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.484409 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496105 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:41 crc kubenswrapper[4650]: E0201 07:43:41.496438 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-central-agent" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496453 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-central-agent" Feb 01 07:43:41 crc kubenswrapper[4650]: E0201 07:43:41.496482 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="sg-core" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496488 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="sg-core" Feb 01 07:43:41 crc kubenswrapper[4650]: E0201 07:43:41.496500 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-notification-agent" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496506 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-notification-agent" Feb 01 07:43:41 crc kubenswrapper[4650]: E0201 07:43:41.496519 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="proxy-httpd" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496524 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="proxy-httpd" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496874 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-notification-agent" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496887 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="ceilometer-central-agent" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496904 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="proxy-httpd" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.496912 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" containerName="sg-core" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.499366 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.505305 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.505973 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.516392 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.617138 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-scripts\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.617186 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-config-data\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.617256 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-log-httpd\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.617416 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-run-httpd\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.617487 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.617522 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.617667 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d4pn\" (UniqueName: \"kubernetes.io/projected/0d22003a-32cd-46b8-96b7-dfff2b7a5762-kube-api-access-8d4pn\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.719538 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-run-httpd\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.719587 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.719611 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.719643 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d4pn\" (UniqueName: \"kubernetes.io/projected/0d22003a-32cd-46b8-96b7-dfff2b7a5762-kube-api-access-8d4pn\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.719688 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-scripts\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.719714 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-config-data\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.720085 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-run-httpd\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.720349 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-log-httpd\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.720467 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-log-httpd\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.727392 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-config-data\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.736575 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-scripts\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.739767 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.740759 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.743697 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d4pn\" (UniqueName: \"kubernetes.io/projected/0d22003a-32cd-46b8-96b7-dfff2b7a5762-kube-api-access-8d4pn\") pod \"ceilometer-0\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.820346 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:41 crc kubenswrapper[4650]: I0201 07:43:41.987539 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b67d9d8-935c-4efe-85aa-46fa51e8da9a" path="/var/lib/kubelet/pods/9b67d9d8-935c-4efe-85aa-46fa51e8da9a/volumes" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.138814 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4"} Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.139087 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2"} Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.139983 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.140882 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.159085 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" exitCode=1 Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.159113 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" exitCode=1 Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.159166 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970"} Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.159191 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a"} Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.159204 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a"} Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.159220 4650 scope.go:117] "RemoveContainer" containerID="dc3b3e55c6ba7d063e9c50fe5650a1e17209c9e4b365945ed3541772ef8edd31" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.160565 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.160638 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:43:42 crc kubenswrapper[4650]: E0201 07:43:42.160983 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.172970 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" event={"ID":"364e7c65-e9d5-4a41-b87b-62b8da17e636","Type":"ContainerStarted","Data":"14dad6be3549ceffb86a1653b9ed525a1992ac37c1e02f90a17d34bf5db4a5ff"} Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.210941 4650 scope.go:117] "RemoveContainer" containerID="b15dd2eed6ee477e8fdff6c86b4fffb3a709c937a2fe28219123859eadb5b492" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.217894 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" podStartSLOduration=2.484336874 podStartE2EDuration="10.217873877s" podCreationTimestamp="2026-02-01 07:43:32 +0000 UTC" firstStartedPulling="2026-02-01 07:43:33.205165842 +0000 UTC m=+1211.928264087" lastFinishedPulling="2026-02-01 07:43:40.938702845 +0000 UTC m=+1219.661801090" observedRunningTime="2026-02-01 07:43:42.2122888 +0000 UTC m=+1220.935387045" watchObservedRunningTime="2026-02-01 07:43:42.217873877 +0000 UTC m=+1220.940972122" Feb 01 07:43:42 crc kubenswrapper[4650]: W0201 07:43:42.301683 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d22003a_32cd_46b8_96b7_dfff2b7a5762.slice/crio-9d2d4a8bef995a397cb64230a46fb49cd6c20056a91f082dd5be968a3b213ddd WatchSource:0}: Error finding container 9d2d4a8bef995a397cb64230a46fb49cd6c20056a91f082dd5be968a3b213ddd: Status 404 returned error can't find the container with id 9d2d4a8bef995a397cb64230a46fb49cd6c20056a91f082dd5be968a3b213ddd Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.304598 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.738459 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.738914 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.784234 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:42 crc kubenswrapper[4650]: I0201 07:43:42.798706 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.053829 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.054244 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.219510 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" exitCode=1 Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.219564 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970"} Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.220561 4650 scope.go:117] "RemoveContainer" containerID="0762ec2515f934e543828087282d638c3ace8afd252c27ec2209aca61ed63e83" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.221627 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.221688 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.221779 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:43:43 crc kubenswrapper[4650]: E0201 07:43:43.222155 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.242916 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerStarted","Data":"70c1bd073df5fe82e3d4d2531121f154ac237b568356d8e148d72261c50d916a"} Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.242961 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerStarted","Data":"9d2d4a8bef995a397cb64230a46fb49cd6c20056a91f082dd5be968a3b213ddd"} Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.253069 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" exitCode=1 Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.253547 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4"} Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.254141 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:43:43 crc kubenswrapper[4650]: E0201 07:43:43.254352 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.254599 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.254621 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:43 crc kubenswrapper[4650]: I0201 07:43:43.312782 4650 scope.go:117] "RemoveContainer" containerID="8fe9f906bb9af0f6e1c2ba42ba5f4922543588a5973cdd095151c3b67117ce78" Feb 01 07:43:44 crc kubenswrapper[4650]: I0201 07:43:44.016945 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5b4d45c6bd-qsdbt" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Feb 01 07:43:44 crc kubenswrapper[4650]: I0201 07:43:44.262649 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerStarted","Data":"921c28e9f835584d55104eb01830162add4a770634bee059a7a54c7e2fee8952"} Feb 01 07:43:44 crc kubenswrapper[4650]: I0201 07:43:44.265984 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:43:44 crc kubenswrapper[4650]: E0201 07:43:44.266657 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:45 crc kubenswrapper[4650]: I0201 07:43:45.288382 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerStarted","Data":"d7ba9c9bf87f9db3a1b561b5f9f82c1cfac14bca311e3569801925507143517b"} Feb 01 07:43:45 crc kubenswrapper[4650]: I0201 07:43:45.485765 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:45 crc kubenswrapper[4650]: I0201 07:43:45.485877 4650 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 01 07:43:45 crc kubenswrapper[4650]: I0201 07:43:45.515582 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 01 07:43:45 crc kubenswrapper[4650]: I0201 07:43:45.799810 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:45 crc kubenswrapper[4650]: I0201 07:43:45.800579 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:43:45 crc kubenswrapper[4650]: E0201 07:43:45.800852 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:47 crc kubenswrapper[4650]: I0201 07:43:47.307549 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerStarted","Data":"f9989d5ba0c2e94409688a62cc9a4a7b122f5bf84cd2e3fc0a7eef9943071a74"} Feb 01 07:43:47 crc kubenswrapper[4650]: I0201 07:43:47.307959 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:43:47 crc kubenswrapper[4650]: I0201 07:43:47.333369 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.660610876 podStartE2EDuration="6.333352083s" podCreationTimestamp="2026-02-01 07:43:41 +0000 UTC" firstStartedPulling="2026-02-01 07:43:42.303823663 +0000 UTC m=+1221.026921908" lastFinishedPulling="2026-02-01 07:43:46.97656487 +0000 UTC m=+1225.699663115" observedRunningTime="2026-02-01 07:43:47.325363582 +0000 UTC m=+1226.048461837" watchObservedRunningTime="2026-02-01 07:43:47.333352083 +0000 UTC m=+1226.056450328" Feb 01 07:43:48 crc kubenswrapper[4650]: I0201 07:43:48.807322 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.331505 4650 generic.go:334] "Generic (PLEG): container finished" podID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerID="121bf0c27d3cd2492d3454ae6a47181d459961964a1aefdd883d489176849870" exitCode=137 Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.331760 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerDied","Data":"121bf0c27d3cd2492d3454ae6a47181d459961964a1aefdd883d489176849870"} Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.372330 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.372561 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-central-agent" containerID="cri-o://70c1bd073df5fe82e3d4d2531121f154ac237b568356d8e148d72261c50d916a" gracePeriod=30 Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.372929 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="proxy-httpd" containerID="cri-o://f9989d5ba0c2e94409688a62cc9a4a7b122f5bf84cd2e3fc0a7eef9943071a74" gracePeriod=30 Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.372970 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="sg-core" containerID="cri-o://d7ba9c9bf87f9db3a1b561b5f9f82c1cfac14bca311e3569801925507143517b" gracePeriod=30 Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.373003 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-notification-agent" containerID="cri-o://921c28e9f835584d55104eb01830162add4a770634bee059a7a54c7e2fee8952" gracePeriod=30 Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.577081 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.663276 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-secret-key\") pod \"7e572f25-ea86-45a7-b828-214b813f9d0c\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.663707 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xs5js\" (UniqueName: \"kubernetes.io/projected/7e572f25-ea86-45a7-b828-214b813f9d0c-kube-api-access-xs5js\") pod \"7e572f25-ea86-45a7-b828-214b813f9d0c\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.663847 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-scripts\") pod \"7e572f25-ea86-45a7-b828-214b813f9d0c\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.664070 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-combined-ca-bundle\") pod \"7e572f25-ea86-45a7-b828-214b813f9d0c\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.664187 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-tls-certs\") pod \"7e572f25-ea86-45a7-b828-214b813f9d0c\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.664361 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e572f25-ea86-45a7-b828-214b813f9d0c-logs\") pod \"7e572f25-ea86-45a7-b828-214b813f9d0c\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.664488 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-config-data\") pod \"7e572f25-ea86-45a7-b828-214b813f9d0c\" (UID: \"7e572f25-ea86-45a7-b828-214b813f9d0c\") " Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.664611 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e572f25-ea86-45a7-b828-214b813f9d0c-logs" (OuterVolumeSpecName: "logs") pod "7e572f25-ea86-45a7-b828-214b813f9d0c" (UID: "7e572f25-ea86-45a7-b828-214b813f9d0c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.665144 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e572f25-ea86-45a7-b828-214b813f9d0c-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.670908 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7e572f25-ea86-45a7-b828-214b813f9d0c" (UID: "7e572f25-ea86-45a7-b828-214b813f9d0c"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.673955 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e572f25-ea86-45a7-b828-214b813f9d0c-kube-api-access-xs5js" (OuterVolumeSpecName: "kube-api-access-xs5js") pod "7e572f25-ea86-45a7-b828-214b813f9d0c" (UID: "7e572f25-ea86-45a7-b828-214b813f9d0c"). InnerVolumeSpecName "kube-api-access-xs5js". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.695471 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-scripts" (OuterVolumeSpecName: "scripts") pod "7e572f25-ea86-45a7-b828-214b813f9d0c" (UID: "7e572f25-ea86-45a7-b828-214b813f9d0c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.698948 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-config-data" (OuterVolumeSpecName: "config-data") pod "7e572f25-ea86-45a7-b828-214b813f9d0c" (UID: "7e572f25-ea86-45a7-b828-214b813f9d0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.710964 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e572f25-ea86-45a7-b828-214b813f9d0c" (UID: "7e572f25-ea86-45a7-b828-214b813f9d0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.724869 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "7e572f25-ea86-45a7-b828-214b813f9d0c" (UID: "7e572f25-ea86-45a7-b828-214b813f9d0c"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.767420 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.767451 4650 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.767460 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.767469 4650 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7e572f25-ea86-45a7-b828-214b813f9d0c-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.767479 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xs5js\" (UniqueName: \"kubernetes.io/projected/7e572f25-ea86-45a7-b828-214b813f9d0c-kube-api-access-xs5js\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.767490 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e572f25-ea86-45a7-b828-214b813f9d0c-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:49 crc kubenswrapper[4650]: I0201 07:43:49.805548 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.343240 4650 generic.go:334] "Generic (PLEG): container finished" podID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerID="f9989d5ba0c2e94409688a62cc9a4a7b122f5bf84cd2e3fc0a7eef9943071a74" exitCode=0 Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.343482 4650 generic.go:334] "Generic (PLEG): container finished" podID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerID="d7ba9c9bf87f9db3a1b561b5f9f82c1cfac14bca311e3569801925507143517b" exitCode=2 Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.343491 4650 generic.go:334] "Generic (PLEG): container finished" podID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerID="921c28e9f835584d55104eb01830162add4a770634bee059a7a54c7e2fee8952" exitCode=0 Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.343310 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerDied","Data":"f9989d5ba0c2e94409688a62cc9a4a7b122f5bf84cd2e3fc0a7eef9943071a74"} Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.343562 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerDied","Data":"d7ba9c9bf87f9db3a1b561b5f9f82c1cfac14bca311e3569801925507143517b"} Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.343575 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerDied","Data":"921c28e9f835584d55104eb01830162add4a770634bee059a7a54c7e2fee8952"} Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.345639 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b4d45c6bd-qsdbt" event={"ID":"7e572f25-ea86-45a7-b828-214b813f9d0c","Type":"ContainerDied","Data":"3991a1f5f7d6ff3bc69e81c8fcf18b986763834ea8fe5c0be7e359e7c44955dc"} Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.345674 4650 scope.go:117] "RemoveContainer" containerID="a451bbea895b092fac95434c169fa8820f75c4dcfbd374cb478929932b3b5264" Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.345725 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b4d45c6bd-qsdbt" Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.380101 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b4d45c6bd-qsdbt"] Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.385467 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5b4d45c6bd-qsdbt"] Feb 01 07:43:50 crc kubenswrapper[4650]: I0201 07:43:50.509276 4650 scope.go:117] "RemoveContainer" containerID="121bf0c27d3cd2492d3454ae6a47181d459961964a1aefdd883d489176849870" Feb 01 07:43:51 crc kubenswrapper[4650]: I0201 07:43:51.810303 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:51 crc kubenswrapper[4650]: I0201 07:43:51.976918 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" path="/var/lib/kubelet/pods/7e572f25-ea86-45a7-b828-214b813f9d0c/volumes" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.368305 4650 generic.go:334] "Generic (PLEG): container finished" podID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerID="70c1bd073df5fe82e3d4d2531121f154ac237b568356d8e148d72261c50d916a" exitCode=0 Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.368421 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerDied","Data":"70c1bd073df5fe82e3d4d2531121f154ac237b568356d8e148d72261c50d916a"} Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.577430 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.615757 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-run-httpd\") pod \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616076 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-sg-core-conf-yaml\") pod \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616128 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d4pn\" (UniqueName: \"kubernetes.io/projected/0d22003a-32cd-46b8-96b7-dfff2b7a5762-kube-api-access-8d4pn\") pod \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616157 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-config-data\") pod \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616202 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-log-httpd\") pod \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616231 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-scripts\") pod \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616353 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-combined-ca-bundle\") pod \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\" (UID: \"0d22003a-32cd-46b8-96b7-dfff2b7a5762\") " Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616423 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0d22003a-32cd-46b8-96b7-dfff2b7a5762" (UID: "0d22003a-32cd-46b8-96b7-dfff2b7a5762"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.616713 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.617378 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0d22003a-32cd-46b8-96b7-dfff2b7a5762" (UID: "0d22003a-32cd-46b8-96b7-dfff2b7a5762"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.640742 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d22003a-32cd-46b8-96b7-dfff2b7a5762-kube-api-access-8d4pn" (OuterVolumeSpecName: "kube-api-access-8d4pn") pod "0d22003a-32cd-46b8-96b7-dfff2b7a5762" (UID: "0d22003a-32cd-46b8-96b7-dfff2b7a5762"). InnerVolumeSpecName "kube-api-access-8d4pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.653293 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0d22003a-32cd-46b8-96b7-dfff2b7a5762" (UID: "0d22003a-32cd-46b8-96b7-dfff2b7a5762"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.655415 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-scripts" (OuterVolumeSpecName: "scripts") pod "0d22003a-32cd-46b8-96b7-dfff2b7a5762" (UID: "0d22003a-32cd-46b8-96b7-dfff2b7a5762"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.694908 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d22003a-32cd-46b8-96b7-dfff2b7a5762" (UID: "0d22003a-32cd-46b8-96b7-dfff2b7a5762"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.719535 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.720400 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.720456 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d4pn\" (UniqueName: \"kubernetes.io/projected/0d22003a-32cd-46b8-96b7-dfff2b7a5762-kube-api-access-8d4pn\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.720474 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d22003a-32cd-46b8-96b7-dfff2b7a5762-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.720486 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.738953 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-config-data" (OuterVolumeSpecName: "config-data") pod "0d22003a-32cd-46b8-96b7-dfff2b7a5762" (UID: "0d22003a-32cd-46b8-96b7-dfff2b7a5762"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:52 crc kubenswrapper[4650]: I0201 07:43:52.821546 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22003a-32cd-46b8-96b7-dfff2b7a5762-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.387432 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d22003a-32cd-46b8-96b7-dfff2b7a5762","Type":"ContainerDied","Data":"9d2d4a8bef995a397cb64230a46fb49cd6c20056a91f082dd5be968a3b213ddd"} Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.387505 4650 scope.go:117] "RemoveContainer" containerID="f9989d5ba0c2e94409688a62cc9a4a7b122f5bf84cd2e3fc0a7eef9943071a74" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.387631 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.427821 4650 scope.go:117] "RemoveContainer" containerID="d7ba9c9bf87f9db3a1b561b5f9f82c1cfac14bca311e3569801925507143517b" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.460448 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.465896 4650 scope.go:117] "RemoveContainer" containerID="921c28e9f835584d55104eb01830162add4a770634bee059a7a54c7e2fee8952" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.481832 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492067 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:53 crc kubenswrapper[4650]: E0201 07:43:53.492504 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-notification-agent" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492522 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-notification-agent" Feb 01 07:43:53 crc kubenswrapper[4650]: E0201 07:43:53.492536 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492542 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" Feb 01 07:43:53 crc kubenswrapper[4650]: E0201 07:43:53.492553 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="sg-core" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492559 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="sg-core" Feb 01 07:43:53 crc kubenswrapper[4650]: E0201 07:43:53.492576 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon-log" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492584 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon-log" Feb 01 07:43:53 crc kubenswrapper[4650]: E0201 07:43:53.492606 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="proxy-httpd" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492612 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="proxy-httpd" Feb 01 07:43:53 crc kubenswrapper[4650]: E0201 07:43:53.492628 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-central-agent" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492634 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-central-agent" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492800 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon-log" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492826 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-notification-agent" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492836 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492852 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="ceilometer-central-agent" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492862 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="sg-core" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492876 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" containerName="proxy-httpd" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.492884 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" Feb 01 07:43:53 crc kubenswrapper[4650]: E0201 07:43:53.493100 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.493139 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e572f25-ea86-45a7-b828-214b813f9d0c" containerName="horizon" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.494762 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.495414 4650 scope.go:117] "RemoveContainer" containerID="70c1bd073df5fe82e3d4d2531121f154ac237b568356d8e148d72261c50d916a" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.500944 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.501187 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.501429 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.534121 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-run-httpd\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.534203 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4lqq\" (UniqueName: \"kubernetes.io/projected/2ab92c14-2c1a-4176-b50f-61cf7eba5262-kube-api-access-j4lqq\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.534238 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.534264 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-config-data\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.534296 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-scripts\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.534343 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.534449 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-log-httpd\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.636377 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-log-httpd\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.636660 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-run-httpd\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.636716 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4lqq\" (UniqueName: \"kubernetes.io/projected/2ab92c14-2c1a-4176-b50f-61cf7eba5262-kube-api-access-j4lqq\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.636739 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.636756 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-config-data\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.636779 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-scripts\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.636811 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.637820 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-run-httpd\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.637984 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-log-httpd\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.641311 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-config-data\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.641571 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.641766 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.654803 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-scripts\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.668916 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4lqq\" (UniqueName: \"kubernetes.io/projected/2ab92c14-2c1a-4176-b50f-61cf7eba5262-kube-api-access-j4lqq\") pod \"ceilometer-0\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.823438 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:43:53 crc kubenswrapper[4650]: I0201 07:43:53.979635 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d22003a-32cd-46b8-96b7-dfff2b7a5762" path="/var/lib/kubelet/pods/0d22003a-32cd-46b8-96b7-dfff2b7a5762/volumes" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.299777 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:43:54 crc kubenswrapper[4650]: W0201 07:43:54.300650 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ab92c14_2c1a_4176_b50f_61cf7eba5262.slice/crio-429301d6eee109928a983d067c8e04b7480be48c56ed4fc217df66ef789f5ed1 WatchSource:0}: Error finding container 429301d6eee109928a983d067c8e04b7480be48c56ed4fc217df66ef789f5ed1: Status 404 returned error can't find the container with id 429301d6eee109928a983d067c8e04b7480be48c56ed4fc217df66ef789f5ed1 Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.401201 4650 generic.go:334] "Generic (PLEG): container finished" podID="364e7c65-e9d5-4a41-b87b-62b8da17e636" containerID="14dad6be3549ceffb86a1653b9ed525a1992ac37c1e02f90a17d34bf5db4a5ff" exitCode=0 Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.401251 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" event={"ID":"364e7c65-e9d5-4a41-b87b-62b8da17e636","Type":"ContainerDied","Data":"14dad6be3549ceffb86a1653b9ed525a1992ac37c1e02f90a17d34bf5db4a5ff"} Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.404911 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerStarted","Data":"429301d6eee109928a983d067c8e04b7480be48c56ed4fc217df66ef789f5ed1"} Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.805704 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.806344 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.806540 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.807856 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.808323 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.808676 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2" gracePeriod=30 Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.812065 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:43:54 crc kubenswrapper[4650]: E0201 07:43:54.934624 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.969459 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.969841 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:43:54 crc kubenswrapper[4650]: I0201 07:43:54.969963 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:43:54 crc kubenswrapper[4650]: E0201 07:43:54.971004 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.436098 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2" exitCode=0 Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.436237 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2"} Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.436277 4650 scope.go:117] "RemoveContainer" containerID="e2b400407f3c9b656bd37f6ac6b6e597a65c3873e1eddea4b784083639dafeec" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.439040 4650 scope.go:117] "RemoveContainer" containerID="3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.439102 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:43:55 crc kubenswrapper[4650]: E0201 07:43:55.439596 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.446200 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerStarted","Data":"5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a"} Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.860737 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.889911 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-config-data\") pod \"364e7c65-e9d5-4a41-b87b-62b8da17e636\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.890041 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8xws\" (UniqueName: \"kubernetes.io/projected/364e7c65-e9d5-4a41-b87b-62b8da17e636-kube-api-access-h8xws\") pod \"364e7c65-e9d5-4a41-b87b-62b8da17e636\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.890080 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-scripts\") pod \"364e7c65-e9d5-4a41-b87b-62b8da17e636\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.890276 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-combined-ca-bundle\") pod \"364e7c65-e9d5-4a41-b87b-62b8da17e636\" (UID: \"364e7c65-e9d5-4a41-b87b-62b8da17e636\") " Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.900195 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/364e7c65-e9d5-4a41-b87b-62b8da17e636-kube-api-access-h8xws" (OuterVolumeSpecName: "kube-api-access-h8xws") pod "364e7c65-e9d5-4a41-b87b-62b8da17e636" (UID: "364e7c65-e9d5-4a41-b87b-62b8da17e636"). InnerVolumeSpecName "kube-api-access-h8xws". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.903981 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-scripts" (OuterVolumeSpecName: "scripts") pod "364e7c65-e9d5-4a41-b87b-62b8da17e636" (UID: "364e7c65-e9d5-4a41-b87b-62b8da17e636"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.926923 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-config-data" (OuterVolumeSpecName: "config-data") pod "364e7c65-e9d5-4a41-b87b-62b8da17e636" (UID: "364e7c65-e9d5-4a41-b87b-62b8da17e636"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.930586 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "364e7c65-e9d5-4a41-b87b-62b8da17e636" (UID: "364e7c65-e9d5-4a41-b87b-62b8da17e636"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.992538 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8xws\" (UniqueName: \"kubernetes.io/projected/364e7c65-e9d5-4a41-b87b-62b8da17e636-kube-api-access-h8xws\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.992575 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.992587 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:55 crc kubenswrapper[4650]: I0201 07:43:55.992600 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/364e7c65-e9d5-4a41-b87b-62b8da17e636-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.462757 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.463767 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9hm6l" event={"ID":"364e7c65-e9d5-4a41-b87b-62b8da17e636","Type":"ContainerDied","Data":"4fa78124ab6510f2f49fe3d173dab67295c3b5f60974417416d742eff4d2e128"} Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.463863 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fa78124ab6510f2f49fe3d173dab67295c3b5f60974417416d742eff4d2e128" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.471084 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerStarted","Data":"e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41"} Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.536181 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 01 07:43:56 crc kubenswrapper[4650]: E0201 07:43:56.536610 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="364e7c65-e9d5-4a41-b87b-62b8da17e636" containerName="nova-cell0-conductor-db-sync" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.536640 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="364e7c65-e9d5-4a41-b87b-62b8da17e636" containerName="nova-cell0-conductor-db-sync" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.536833 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="364e7c65-e9d5-4a41-b87b-62b8da17e636" containerName="nova-cell0-conductor-db-sync" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.537439 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.540795 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-fw65n" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.541261 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.544059 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.602122 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9fc1633-ee89-4602-b737-a3644616841b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.602182 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gw9q\" (UniqueName: \"kubernetes.io/projected/c9fc1633-ee89-4602-b737-a3644616841b-kube-api-access-9gw9q\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.602366 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fc1633-ee89-4602-b737-a3644616841b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.704135 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fc1633-ee89-4602-b737-a3644616841b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.704363 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9fc1633-ee89-4602-b737-a3644616841b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.704444 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gw9q\" (UniqueName: \"kubernetes.io/projected/c9fc1633-ee89-4602-b737-a3644616841b-kube-api-access-9gw9q\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.708006 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fc1633-ee89-4602-b737-a3644616841b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.709473 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9fc1633-ee89-4602-b737-a3644616841b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.720802 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gw9q\" (UniqueName: \"kubernetes.io/projected/c9fc1633-ee89-4602-b737-a3644616841b-kube-api-access-9gw9q\") pod \"nova-cell0-conductor-0\" (UID: \"c9fc1633-ee89-4602-b737-a3644616841b\") " pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:56 crc kubenswrapper[4650]: I0201 07:43:56.864068 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:57 crc kubenswrapper[4650]: W0201 07:43:57.355474 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9fc1633_ee89_4602_b737_a3644616841b.slice/crio-3a21c5fc469716233e4d67eb9825c4963f1cac0595fa43c1c8d1ac419bec1b17 WatchSource:0}: Error finding container 3a21c5fc469716233e4d67eb9825c4963f1cac0595fa43c1c8d1ac419bec1b17: Status 404 returned error can't find the container with id 3a21c5fc469716233e4d67eb9825c4963f1cac0595fa43c1c8d1ac419bec1b17 Feb 01 07:43:57 crc kubenswrapper[4650]: I0201 07:43:57.366543 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 01 07:43:57 crc kubenswrapper[4650]: I0201 07:43:57.483324 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c9fc1633-ee89-4602-b737-a3644616841b","Type":"ContainerStarted","Data":"3a21c5fc469716233e4d67eb9825c4963f1cac0595fa43c1c8d1ac419bec1b17"} Feb 01 07:43:57 crc kubenswrapper[4650]: I0201 07:43:57.486060 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerStarted","Data":"233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456"} Feb 01 07:43:57 crc kubenswrapper[4650]: I0201 07:43:57.832427 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.157:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:43:57 crc kubenswrapper[4650]: I0201 07:43:57.832441 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="5ae063fc-da05-4f12-96aa-ea13d37dc9d0" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.157:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 01 07:43:58 crc kubenswrapper[4650]: I0201 07:43:58.502447 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c9fc1633-ee89-4602-b737-a3644616841b","Type":"ContainerStarted","Data":"dc559eec3016fd23e9535b5582b2baac4e7e40d1ea56e68e13cb425e6536d08b"} Feb 01 07:43:58 crc kubenswrapper[4650]: I0201 07:43:58.502611 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Feb 01 07:43:58 crc kubenswrapper[4650]: I0201 07:43:58.522714 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.522695707 podStartE2EDuration="2.522695707s" podCreationTimestamp="2026-02-01 07:43:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:43:58.516053653 +0000 UTC m=+1237.239151908" watchObservedRunningTime="2026-02-01 07:43:58.522695707 +0000 UTC m=+1237.245793962" Feb 01 07:43:59 crc kubenswrapper[4650]: I0201 07:43:59.512853 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerStarted","Data":"acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc"} Feb 01 07:43:59 crc kubenswrapper[4650]: I0201 07:43:59.542137 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.322491383 podStartE2EDuration="6.542113484s" podCreationTimestamp="2026-02-01 07:43:53 +0000 UTC" firstStartedPulling="2026-02-01 07:43:54.302904571 +0000 UTC m=+1233.026002816" lastFinishedPulling="2026-02-01 07:43:58.522526652 +0000 UTC m=+1237.245624917" observedRunningTime="2026-02-01 07:43:59.537149584 +0000 UTC m=+1238.260247829" watchObservedRunningTime="2026-02-01 07:43:59.542113484 +0000 UTC m=+1238.265211749" Feb 01 07:44:00 crc kubenswrapper[4650]: I0201 07:44:00.521828 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:44:06 crc kubenswrapper[4650]: I0201 07:44:06.905453 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.535448 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-kd98m"] Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.536764 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.553606 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.553895 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.620138 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kd98m"] Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.689116 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-config-data\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.689170 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.689198 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-scripts\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.689214 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8fdn\" (UniqueName: \"kubernetes.io/projected/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-kube-api-access-l8fdn\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.734458 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.735993 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.742937 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.753457 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.790659 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-config-data\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.790726 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.790767 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-scripts\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.790788 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8fdn\" (UniqueName: \"kubernetes.io/projected/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-kube-api-access-l8fdn\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.815516 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-scripts\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.815598 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.818551 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-config-data\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.824446 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.825554 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.831706 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.852574 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8fdn\" (UniqueName: \"kubernetes.io/projected/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-kube-api-access-l8fdn\") pod \"nova-cell0-cell-mapping-kd98m\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.869404 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.885174 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.900459 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-config-data\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.900532 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d39239-82ba-4408-a37b-c183f8e9fdea-logs\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.900578 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.900597 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s54kq\" (UniqueName: \"kubernetes.io/projected/51d39239-82ba-4408-a37b-c183f8e9fdea-kube-api-access-s54kq\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.979057 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.979144 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.979255 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:44:07 crc kubenswrapper[4650]: E0201 07:44:07.979735 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.981207 4650 scope.go:117] "RemoveContainer" containerID="3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2" Feb 01 07:44:07 crc kubenswrapper[4650]: I0201 07:44:07.981238 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:44:07 crc kubenswrapper[4650]: E0201 07:44:07.981490 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.001265 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.003372 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-config-data\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.003445 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5w6z\" (UniqueName: \"kubernetes.io/projected/a1ad86d5-4286-47cd-899c-7c5ec57112ab-kube-api-access-g5w6z\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.003486 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d39239-82ba-4408-a37b-c183f8e9fdea-logs\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.003512 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-config-data\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.003542 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.003574 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.003594 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s54kq\" (UniqueName: \"kubernetes.io/projected/51d39239-82ba-4408-a37b-c183f8e9fdea-kube-api-access-s54kq\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.008643 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.011910 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d39239-82ba-4408-a37b-c183f8e9fdea-logs\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.012735 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.018535 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.026905 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.027576 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-config-data\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.066600 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s54kq\" (UniqueName: \"kubernetes.io/projected/51d39239-82ba-4408-a37b-c183f8e9fdea-kube-api-access-s54kq\") pod \"nova-api-0\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.096395 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.097532 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.105552 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.107096 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5w6z\" (UniqueName: \"kubernetes.io/projected/a1ad86d5-4286-47cd-899c-7c5ec57112ab-kube-api-access-g5w6z\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.107156 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-config-data\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.107200 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.120111 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-config-data\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.120697 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.124010 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.158529 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5w6z\" (UniqueName: \"kubernetes.io/projected/a1ad86d5-4286-47cd-899c-7c5ec57112ab-kube-api-access-g5w6z\") pod \"nova-scheduler-0\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.207267 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c6ccb6797-25srx"] Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.208772 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-config-data\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.208817 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.208839 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2wz5\" (UniqueName: \"kubernetes.io/projected/969948e5-54d3-4e61-bc18-e3e499c96582-kube-api-access-m2wz5\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.208865 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.208919 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqjsj\" (UniqueName: \"kubernetes.io/projected/6876de01-6095-4fe4-a799-37444b455a82-kube-api-access-cqjsj\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.208967 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/969948e5-54d3-4e61-bc18-e3e499c96582-logs\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.208994 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.211059 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.261824 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6ccb6797-25srx"] Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310287 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/969948e5-54d3-4e61-bc18-e3e499c96582-logs\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310330 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-dns-svc\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310438 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310509 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-nb\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310538 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-config-data\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310568 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310589 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2wz5\" (UniqueName: \"kubernetes.io/projected/969948e5-54d3-4e61-bc18-e3e499c96582-kube-api-access-m2wz5\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310609 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310646 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-sb\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310663 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqjsj\" (UniqueName: \"kubernetes.io/projected/6876de01-6095-4fe4-a799-37444b455a82-kube-api-access-cqjsj\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310686 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-config\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.310708 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmqr9\" (UniqueName: \"kubernetes.io/projected/474466cd-43fb-4e2f-8d45-c782ece71569-kube-api-access-vmqr9\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.311056 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/969948e5-54d3-4e61-bc18-e3e499c96582-logs\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.317278 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.318079 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.323643 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.325160 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.326566 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-config-data\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.334945 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2wz5\" (UniqueName: \"kubernetes.io/projected/969948e5-54d3-4e61-bc18-e3e499c96582-kube-api-access-m2wz5\") pod \"nova-metadata-0\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.339969 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqjsj\" (UniqueName: \"kubernetes.io/projected/6876de01-6095-4fe4-a799-37444b455a82-kube-api-access-cqjsj\") pod \"nova-cell1-novncproxy-0\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.353474 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.396865 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.412538 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-nb\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.412752 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-sb\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.412785 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-config\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.412809 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmqr9\" (UniqueName: \"kubernetes.io/projected/474466cd-43fb-4e2f-8d45-c782ece71569-kube-api-access-vmqr9\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.412849 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-dns-svc\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.413840 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-sb\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.414369 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-nb\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.414523 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-dns-svc\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.414874 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-config\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.434304 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmqr9\" (UniqueName: \"kubernetes.io/projected/474466cd-43fb-4e2f-8d45-c782ece71569-kube-api-access-vmqr9\") pod \"dnsmasq-dns-7c6ccb6797-25srx\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.445506 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.582799 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:08 crc kubenswrapper[4650]: I0201 07:44:08.752517 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kd98m"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.220886 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.234399 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.412772 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-jzxjl"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.413847 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.427403 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.427679 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.428059 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-jzxjl"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.536462 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrmrc\" (UniqueName: \"kubernetes.io/projected/471ec131-07c2-4fd8-a63a-e36c42859d92-kube-api-access-xrmrc\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.536547 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.536617 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-config-data\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.536678 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-scripts\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.558410 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.580017 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.590727 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6ccb6797-25srx"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.601755 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.638481 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-config-data\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.638776 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-scripts\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.642061 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrmrc\" (UniqueName: \"kubernetes.io/projected/471ec131-07c2-4fd8-a63a-e36c42859d92-kube-api-access-xrmrc\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.642227 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-config-data\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.643097 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.647227 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-scripts\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.649515 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.656087 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kd98m" event={"ID":"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277","Type":"ContainerStarted","Data":"7d4409e31e756b8c82c24433231b02179bb889b7eac8403081f27f4d1c6b222c"} Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.656126 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kd98m" event={"ID":"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277","Type":"ContainerStarted","Data":"76babe4a9ce719498f55d3a30a5b6fdeb7d5bc9746e7a930fdf96466d5026088"} Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.658660 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6876de01-6095-4fe4-a799-37444b455a82","Type":"ContainerStarted","Data":"70d766ce5870942f7aa18d497e486ac891f00574c4b45cfa3c739dbdec2650bd"} Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.661124 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" event={"ID":"474466cd-43fb-4e2f-8d45-c782ece71569","Type":"ContainerStarted","Data":"e652a96a124f57241cd4fa60d7612c4fb0d988d4d4dfa818cb0916f5526bcebe"} Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.675743 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"969948e5-54d3-4e61-bc18-e3e499c96582","Type":"ContainerStarted","Data":"61da7878c3c95f3e200a266d3e89c99d979052b963772a0b0732ce1dd685b5d2"} Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.677155 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a1ad86d5-4286-47cd-899c-7c5ec57112ab","Type":"ContainerStarted","Data":"ccff01b3310f7ab05840d16a1106275eeed06dae28e7ab488aca9a66ae9495ec"} Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.680169 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"51d39239-82ba-4408-a37b-c183f8e9fdea","Type":"ContainerStarted","Data":"ce4e248b37018c4b5489319623fde53cf9c46cdc04ec70b8e1fa84d95ddf016d"} Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.684901 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-kd98m" podStartSLOduration=2.684883719 podStartE2EDuration="2.684883719s" podCreationTimestamp="2026-02-01 07:44:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:09.673333467 +0000 UTC m=+1248.396431732" watchObservedRunningTime="2026-02-01 07:44:09.684883719 +0000 UTC m=+1248.407981964" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.688532 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrmrc\" (UniqueName: \"kubernetes.io/projected/471ec131-07c2-4fd8-a63a-e36c42859d92-kube-api-access-xrmrc\") pod \"nova-cell1-conductor-db-sync-jzxjl\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:09 crc kubenswrapper[4650]: I0201 07:44:09.742072 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:10 crc kubenswrapper[4650]: I0201 07:44:10.290684 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-jzxjl"] Feb 01 07:44:10 crc kubenswrapper[4650]: I0201 07:44:10.700276 4650 generic.go:334] "Generic (PLEG): container finished" podID="474466cd-43fb-4e2f-8d45-c782ece71569" containerID="e9f82b81836da56347f6486bdc715b67ef24b6ab809038dc3b3d1ec85df65201" exitCode=0 Feb 01 07:44:10 crc kubenswrapper[4650]: I0201 07:44:10.700705 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" event={"ID":"474466cd-43fb-4e2f-8d45-c782ece71569","Type":"ContainerDied","Data":"e9f82b81836da56347f6486bdc715b67ef24b6ab809038dc3b3d1ec85df65201"} Feb 01 07:44:10 crc kubenswrapper[4650]: I0201 07:44:10.708445 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" event={"ID":"471ec131-07c2-4fd8-a63a-e36c42859d92","Type":"ContainerStarted","Data":"18495f3abfa0c95d7e094f37ab6c91e86a400a889cc0915b7d5efccf03247642"} Feb 01 07:44:10 crc kubenswrapper[4650]: I0201 07:44:10.747220 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" podStartSLOduration=1.7471952979999998 podStartE2EDuration="1.747195298s" podCreationTimestamp="2026-02-01 07:44:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:10.737050233 +0000 UTC m=+1249.460148478" watchObservedRunningTime="2026-02-01 07:44:10.747195298 +0000 UTC m=+1249.470293553" Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.421513 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.444037 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.739972 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="6f66535bdcc720ca6331a25502406a022cc11d5deb1d240c85548ae491d10847" exitCode=1 Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.740066 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"6f66535bdcc720ca6331a25502406a022cc11d5deb1d240c85548ae491d10847"} Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.740849 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.740918 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.740940 4650 scope.go:117] "RemoveContainer" containerID="6f66535bdcc720ca6331a25502406a022cc11d5deb1d240c85548ae491d10847" Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.741014 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:44:11 crc kubenswrapper[4650]: I0201 07:44:11.744306 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" event={"ID":"471ec131-07c2-4fd8-a63a-e36c42859d92","Type":"ContainerStarted","Data":"be31af16eaf0b1e0f020f9f1e29766b14c92b7dba820b53b47f4ebfc058c0e05"} Feb 01 07:44:13 crc kubenswrapper[4650]: E0201 07:44:13.232535 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.763131 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" event={"ID":"474466cd-43fb-4e2f-8d45-c782ece71569","Type":"ContainerStarted","Data":"d20d45fcbd61e76a263e790789888efdf66c9aa7477c86b0bd53356884706e37"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.763517 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.766232 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"969948e5-54d3-4e61-bc18-e3e499c96582","Type":"ContainerStarted","Data":"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.766328 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-log" containerID="cri-o://78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a" gracePeriod=30 Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.766355 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-metadata" containerID="cri-o://1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492" gracePeriod=30 Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.766444 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"969948e5-54d3-4e61-bc18-e3e499c96582","Type":"ContainerStarted","Data":"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.773702 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a1ad86d5-4286-47cd-899c-7c5ec57112ab","Type":"ContainerStarted","Data":"5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.780369 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"50273097fa73190c557fb1b573c67450a63d38490679debfafff88dd70648dc6"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.780861 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.780915 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.781001 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:44:13 crc kubenswrapper[4650]: E0201 07:44:13.781265 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.783745 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" podStartSLOduration=5.783725831 podStartE2EDuration="5.783725831s" podCreationTimestamp="2026-02-01 07:44:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:13.78142387 +0000 UTC m=+1252.504522125" watchObservedRunningTime="2026-02-01 07:44:13.783725831 +0000 UTC m=+1252.506824076" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.784324 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"51d39239-82ba-4408-a37b-c183f8e9fdea","Type":"ContainerStarted","Data":"29d2a0b9802f4ccdfd166dcda223ee7acbd4fcd64e73643ff9b06682cb17ed4e"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.784375 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"51d39239-82ba-4408-a37b-c183f8e9fdea","Type":"ContainerStarted","Data":"c4d7d51f16e0e5294a5d616dc0fd929e9d535d2ec34bcdbf7db8377559868859"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.785848 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6876de01-6095-4fe4-a799-37444b455a82","Type":"ContainerStarted","Data":"2da597a32f0a46d324d9340fc57a17fe507ac7fa3133ce14d350b1ad1709332b"} Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.785928 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="6876de01-6095-4fe4-a799-37444b455a82" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://2da597a32f0a46d324d9340fc57a17fe507ac7fa3133ce14d350b1ad1709332b" gracePeriod=30 Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.847142 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.523002053 podStartE2EDuration="6.846932684s" podCreationTimestamp="2026-02-01 07:44:07 +0000 UTC" firstStartedPulling="2026-02-01 07:44:09.556761167 +0000 UTC m=+1248.279859412" lastFinishedPulling="2026-02-01 07:44:12.880691798 +0000 UTC m=+1251.603790043" observedRunningTime="2026-02-01 07:44:13.841143533 +0000 UTC m=+1252.564241778" watchObservedRunningTime="2026-02-01 07:44:13.846932684 +0000 UTC m=+1252.570030929" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.870611 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.599633178 podStartE2EDuration="6.870592513s" podCreationTimestamp="2026-02-01 07:44:07 +0000 UTC" firstStartedPulling="2026-02-01 07:44:09.600856281 +0000 UTC m=+1248.323954516" lastFinishedPulling="2026-02-01 07:44:12.871815606 +0000 UTC m=+1251.594913851" observedRunningTime="2026-02-01 07:44:13.861069354 +0000 UTC m=+1252.584167599" watchObservedRunningTime="2026-02-01 07:44:13.870592513 +0000 UTC m=+1252.593690758" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.881760 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.590858438 podStartE2EDuration="6.881745465s" podCreationTimestamp="2026-02-01 07:44:07 +0000 UTC" firstStartedPulling="2026-02-01 07:44:09.584372609 +0000 UTC m=+1248.307470854" lastFinishedPulling="2026-02-01 07:44:12.875259636 +0000 UTC m=+1251.598357881" observedRunningTime="2026-02-01 07:44:13.878917091 +0000 UTC m=+1252.602015336" watchObservedRunningTime="2026-02-01 07:44:13.881745465 +0000 UTC m=+1252.604843710" Feb 01 07:44:13 crc kubenswrapper[4650]: I0201 07:44:13.897753 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.245185516 podStartE2EDuration="5.897737543s" podCreationTimestamp="2026-02-01 07:44:08 +0000 UTC" firstStartedPulling="2026-02-01 07:44:09.220621044 +0000 UTC m=+1247.943719289" lastFinishedPulling="2026-02-01 07:44:12.873173071 +0000 UTC m=+1251.596271316" observedRunningTime="2026-02-01 07:44:13.894724934 +0000 UTC m=+1252.617823179" watchObservedRunningTime="2026-02-01 07:44:13.897737543 +0000 UTC m=+1252.620835788" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.699149 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.797144 4650 generic.go:334] "Generic (PLEG): container finished" podID="969948e5-54d3-4e61-bc18-e3e499c96582" containerID="1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492" exitCode=0 Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.797403 4650 generic.go:334] "Generic (PLEG): container finished" podID="969948e5-54d3-4e61-bc18-e3e499c96582" containerID="78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a" exitCode=143 Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.797211 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.797191 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"969948e5-54d3-4e61-bc18-e3e499c96582","Type":"ContainerDied","Data":"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492"} Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.797485 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"969948e5-54d3-4e61-bc18-e3e499c96582","Type":"ContainerDied","Data":"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a"} Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.797499 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"969948e5-54d3-4e61-bc18-e3e499c96582","Type":"ContainerDied","Data":"61da7878c3c95f3e200a266d3e89c99d979052b963772a0b0732ce1dd685b5d2"} Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.797515 4650 scope.go:117] "RemoveContainer" containerID="1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.817466 4650 scope.go:117] "RemoveContainer" containerID="78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.832953 4650 scope.go:117] "RemoveContainer" containerID="1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492" Feb 01 07:44:14 crc kubenswrapper[4650]: E0201 07:44:14.833411 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492\": container with ID starting with 1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492 not found: ID does not exist" containerID="1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.833461 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492"} err="failed to get container status \"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492\": rpc error: code = NotFound desc = could not find container \"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492\": container with ID starting with 1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492 not found: ID does not exist" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.833495 4650 scope.go:117] "RemoveContainer" containerID="78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a" Feb 01 07:44:14 crc kubenswrapper[4650]: E0201 07:44:14.833799 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a\": container with ID starting with 78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a not found: ID does not exist" containerID="78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.833837 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a"} err="failed to get container status \"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a\": rpc error: code = NotFound desc = could not find container \"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a\": container with ID starting with 78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a not found: ID does not exist" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.833855 4650 scope.go:117] "RemoveContainer" containerID="1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.834085 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492"} err="failed to get container status \"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492\": rpc error: code = NotFound desc = could not find container \"1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492\": container with ID starting with 1e48a9d113ad903912a2aeb95ca09b3ce5ecb482d6c375534dff38733e6b5492 not found: ID does not exist" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.834113 4650 scope.go:117] "RemoveContainer" containerID="78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.834308 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a"} err="failed to get container status \"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a\": rpc error: code = NotFound desc = could not find container \"78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a\": container with ID starting with 78322b6c215cd37eb806bdf28d1e5344c21f0b01bc5e606a02720aa6afe64b0a not found: ID does not exist" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.897438 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-combined-ca-bundle\") pod \"969948e5-54d3-4e61-bc18-e3e499c96582\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.897612 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/969948e5-54d3-4e61-bc18-e3e499c96582-logs\") pod \"969948e5-54d3-4e61-bc18-e3e499c96582\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.897680 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2wz5\" (UniqueName: \"kubernetes.io/projected/969948e5-54d3-4e61-bc18-e3e499c96582-kube-api-access-m2wz5\") pod \"969948e5-54d3-4e61-bc18-e3e499c96582\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.897771 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-config-data\") pod \"969948e5-54d3-4e61-bc18-e3e499c96582\" (UID: \"969948e5-54d3-4e61-bc18-e3e499c96582\") " Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.898316 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/969948e5-54d3-4e61-bc18-e3e499c96582-logs" (OuterVolumeSpecName: "logs") pod "969948e5-54d3-4e61-bc18-e3e499c96582" (UID: "969948e5-54d3-4e61-bc18-e3e499c96582"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.899851 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/969948e5-54d3-4e61-bc18-e3e499c96582-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.921713 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/969948e5-54d3-4e61-bc18-e3e499c96582-kube-api-access-m2wz5" (OuterVolumeSpecName: "kube-api-access-m2wz5") pod "969948e5-54d3-4e61-bc18-e3e499c96582" (UID: "969948e5-54d3-4e61-bc18-e3e499c96582"). InnerVolumeSpecName "kube-api-access-m2wz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.927549 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-config-data" (OuterVolumeSpecName: "config-data") pod "969948e5-54d3-4e61-bc18-e3e499c96582" (UID: "969948e5-54d3-4e61-bc18-e3e499c96582"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:14 crc kubenswrapper[4650]: I0201 07:44:14.936768 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "969948e5-54d3-4e61-bc18-e3e499c96582" (UID: "969948e5-54d3-4e61-bc18-e3e499c96582"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.000870 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.000906 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2wz5\" (UniqueName: \"kubernetes.io/projected/969948e5-54d3-4e61-bc18-e3e499c96582-kube-api-access-m2wz5\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.000918 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969948e5-54d3-4e61-bc18-e3e499c96582-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.128074 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.137044 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.153115 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:15 crc kubenswrapper[4650]: E0201 07:44:15.153481 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-log" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.153496 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-log" Feb 01 07:44:15 crc kubenswrapper[4650]: E0201 07:44:15.153518 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-metadata" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.153524 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-metadata" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.153695 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-metadata" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.153712 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" containerName="nova-metadata-log" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.154615 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.158128 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.159532 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.191896 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.313017 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.313183 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154566db-5f09-4112-a592-433b8ea8bd5c-logs\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.313382 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.313430 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-config-data\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.313477 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n95tb\" (UniqueName: \"kubernetes.io/projected/154566db-5f09-4112-a592-433b8ea8bd5c-kube-api-access-n95tb\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.414725 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.414990 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-config-data\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.415125 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n95tb\" (UniqueName: \"kubernetes.io/projected/154566db-5f09-4112-a592-433b8ea8bd5c-kube-api-access-n95tb\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.415250 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.415338 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154566db-5f09-4112-a592-433b8ea8bd5c-logs\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.415701 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154566db-5f09-4112-a592-433b8ea8bd5c-logs\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.423898 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.423995 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-config-data\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.424286 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.441682 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n95tb\" (UniqueName: \"kubernetes.io/projected/154566db-5f09-4112-a592-433b8ea8bd5c-kube-api-access-n95tb\") pod \"nova-metadata-0\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.469556 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:15 crc kubenswrapper[4650]: I0201 07:44:15.974737 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="969948e5-54d3-4e61-bc18-e3e499c96582" path="/var/lib/kubelet/pods/969948e5-54d3-4e61-bc18-e3e499c96582/volumes" Feb 01 07:44:16 crc kubenswrapper[4650]: W0201 07:44:16.039400 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod154566db_5f09_4112_a592_433b8ea8bd5c.slice/crio-817d9b7a19fb2402319a4442989c5b897b1eb02e222354bbb88882f5cb99777a WatchSource:0}: Error finding container 817d9b7a19fb2402319a4442989c5b897b1eb02e222354bbb88882f5cb99777a: Status 404 returned error can't find the container with id 817d9b7a19fb2402319a4442989c5b897b1eb02e222354bbb88882f5cb99777a Feb 01 07:44:16 crc kubenswrapper[4650]: I0201 07:44:16.056545 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:16 crc kubenswrapper[4650]: I0201 07:44:16.817292 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"154566db-5f09-4112-a592-433b8ea8bd5c","Type":"ContainerStarted","Data":"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141"} Feb 01 07:44:16 crc kubenswrapper[4650]: I0201 07:44:16.817614 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"154566db-5f09-4112-a592-433b8ea8bd5c","Type":"ContainerStarted","Data":"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc"} Feb 01 07:44:16 crc kubenswrapper[4650]: I0201 07:44:16.817627 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"154566db-5f09-4112-a592-433b8ea8bd5c","Type":"ContainerStarted","Data":"817d9b7a19fb2402319a4442989c5b897b1eb02e222354bbb88882f5cb99777a"} Feb 01 07:44:16 crc kubenswrapper[4650]: I0201 07:44:16.836628 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.836611781 podStartE2EDuration="1.836611781s" podCreationTimestamp="2026-02-01 07:44:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:16.836206561 +0000 UTC m=+1255.559304806" watchObservedRunningTime="2026-02-01 07:44:16.836611781 +0000 UTC m=+1255.559710026" Feb 01 07:44:17 crc kubenswrapper[4650]: I0201 07:44:17.833786 4650 generic.go:334] "Generic (PLEG): container finished" podID="fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" containerID="7d4409e31e756b8c82c24433231b02179bb889b7eac8403081f27f4d1c6b222c" exitCode=0 Feb 01 07:44:17 crc kubenswrapper[4650]: I0201 07:44:17.833884 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kd98m" event={"ID":"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277","Type":"ContainerDied","Data":"7d4409e31e756b8c82c24433231b02179bb889b7eac8403081f27f4d1c6b222c"} Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.324857 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.325187 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.355591 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.355644 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.358765 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.447158 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.584156 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.674424 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77c9c856fc-k7lkj"] Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.674666 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" podUID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerName="dnsmasq-dns" containerID="cri-o://b590b8fe5f73e6c43905b4cc248783bff820334eca133a956c67355237676a1b" gracePeriod=10 Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.848184 4650 generic.go:334] "Generic (PLEG): container finished" podID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerID="b590b8fe5f73e6c43905b4cc248783bff820334eca133a956c67355237676a1b" exitCode=0 Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.849237 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" event={"ID":"4f5171ae-f202-4f51-a05b-c13e7136959c","Type":"ContainerDied","Data":"b590b8fe5f73e6c43905b4cc248783bff820334eca133a956c67355237676a1b"} Feb 01 07:44:18 crc kubenswrapper[4650]: I0201 07:44:18.929239 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.288764 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.430780 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-nb\") pod \"4f5171ae-f202-4f51-a05b-c13e7136959c\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.430898 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-sb\") pod \"4f5171ae-f202-4f51-a05b-c13e7136959c\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.430980 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-config\") pod \"4f5171ae-f202-4f51-a05b-c13e7136959c\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.431009 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-dns-svc\") pod \"4f5171ae-f202-4f51-a05b-c13e7136959c\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.431056 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjz2c\" (UniqueName: \"kubernetes.io/projected/4f5171ae-f202-4f51-a05b-c13e7136959c-kube-api-access-hjz2c\") pod \"4f5171ae-f202-4f51-a05b-c13e7136959c\" (UID: \"4f5171ae-f202-4f51-a05b-c13e7136959c\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.439379 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f5171ae-f202-4f51-a05b-c13e7136959c-kube-api-access-hjz2c" (OuterVolumeSpecName: "kube-api-access-hjz2c") pod "4f5171ae-f202-4f51-a05b-c13e7136959c" (UID: "4f5171ae-f202-4f51-a05b-c13e7136959c"). InnerVolumeSpecName "kube-api-access-hjz2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.439441 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.439788 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.494976 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.505578 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4f5171ae-f202-4f51-a05b-c13e7136959c" (UID: "4f5171ae-f202-4f51-a05b-c13e7136959c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.505808 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4f5171ae-f202-4f51-a05b-c13e7136959c" (UID: "4f5171ae-f202-4f51-a05b-c13e7136959c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.533531 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.533563 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjz2c\" (UniqueName: \"kubernetes.io/projected/4f5171ae-f202-4f51-a05b-c13e7136959c-kube-api-access-hjz2c\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.533576 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.543248 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4f5171ae-f202-4f51-a05b-c13e7136959c" (UID: "4f5171ae-f202-4f51-a05b-c13e7136959c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.558544 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-config" (OuterVolumeSpecName: "config") pod "4f5171ae-f202-4f51-a05b-c13e7136959c" (UID: "4f5171ae-f202-4f51-a05b-c13e7136959c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.635079 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-config-data\") pod \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.635348 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8fdn\" (UniqueName: \"kubernetes.io/projected/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-kube-api-access-l8fdn\") pod \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.635441 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-scripts\") pod \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.635521 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-combined-ca-bundle\") pod \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\" (UID: \"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277\") " Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.636351 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.636375 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f5171ae-f202-4f51-a05b-c13e7136959c-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.640966 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-scripts" (OuterVolumeSpecName: "scripts") pod "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" (UID: "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.641202 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-kube-api-access-l8fdn" (OuterVolumeSpecName: "kube-api-access-l8fdn") pod "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" (UID: "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277"). InnerVolumeSpecName "kube-api-access-l8fdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.662165 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" (UID: "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.668530 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-config-data" (OuterVolumeSpecName: "config-data") pod "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" (UID: "fabe1af1-e17e-4cb8-9b5f-0def0d4ff277"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.738305 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.738337 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.738347 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.738355 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8fdn\" (UniqueName: \"kubernetes.io/projected/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277-kube-api-access-l8fdn\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.858633 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kd98m" event={"ID":"fabe1af1-e17e-4cb8-9b5f-0def0d4ff277","Type":"ContainerDied","Data":"76babe4a9ce719498f55d3a30a5b6fdeb7d5bc9746e7a930fdf96466d5026088"} Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.858912 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76babe4a9ce719498f55d3a30a5b6fdeb7d5bc9746e7a930fdf96466d5026088" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.859178 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kd98m" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.879454 4650 generic.go:334] "Generic (PLEG): container finished" podID="471ec131-07c2-4fd8-a63a-e36c42859d92" containerID="be31af16eaf0b1e0f020f9f1e29766b14c92b7dba820b53b47f4ebfc058c0e05" exitCode=0 Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.879541 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" event={"ID":"471ec131-07c2-4fd8-a63a-e36c42859d92","Type":"ContainerDied","Data":"be31af16eaf0b1e0f020f9f1e29766b14c92b7dba820b53b47f4ebfc058c0e05"} Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.885913 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.886277 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c9c856fc-k7lkj" event={"ID":"4f5171ae-f202-4f51-a05b-c13e7136959c","Type":"ContainerDied","Data":"6426e85560160582cd934b0deca0ccfa532a219a19a0f534940752af16302637"} Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.886363 4650 scope.go:117] "RemoveContainer" containerID="b590b8fe5f73e6c43905b4cc248783bff820334eca133a956c67355237676a1b" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.946320 4650 scope.go:117] "RemoveContainer" containerID="9b8fd444628a9918940b0e1c1430c239ee56c8ad25b17dc99bbf6bdeba93b974" Feb 01 07:44:19 crc kubenswrapper[4650]: I0201 07:44:19.992114 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77c9c856fc-k7lkj"] Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.000124 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77c9c856fc-k7lkj"] Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.252797 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.253532 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-log" containerID="cri-o://c4d7d51f16e0e5294a5d616dc0fd929e9d535d2ec34bcdbf7db8377559868859" gracePeriod=30 Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.254256 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-api" containerID="cri-o://29d2a0b9802f4ccdfd166dcda223ee7acbd4fcd64e73643ff9b06682cb17ed4e" gracePeriod=30 Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.284899 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.285174 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-log" containerID="cri-o://d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc" gracePeriod=30 Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.285325 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-metadata" containerID="cri-o://1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141" gracePeriod=30 Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.470089 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.470131 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.741200 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.780064 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.895078 4650 generic.go:334] "Generic (PLEG): container finished" podID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerID="c4d7d51f16e0e5294a5d616dc0fd929e9d535d2ec34bcdbf7db8377559868859" exitCode=143 Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.895140 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"51d39239-82ba-4408-a37b-c183f8e9fdea","Type":"ContainerDied","Data":"c4d7d51f16e0e5294a5d616dc0fd929e9d535d2ec34bcdbf7db8377559868859"} Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.897082 4650 generic.go:334] "Generic (PLEG): container finished" podID="154566db-5f09-4112-a592-433b8ea8bd5c" containerID="1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141" exitCode=0 Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.897115 4650 generic.go:334] "Generic (PLEG): container finished" podID="154566db-5f09-4112-a592-433b8ea8bd5c" containerID="d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc" exitCode=143 Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.897129 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.897218 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"154566db-5f09-4112-a592-433b8ea8bd5c","Type":"ContainerDied","Data":"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141"} Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.897253 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"154566db-5f09-4112-a592-433b8ea8bd5c","Type":"ContainerDied","Data":"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc"} Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.897264 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"154566db-5f09-4112-a592-433b8ea8bd5c","Type":"ContainerDied","Data":"817d9b7a19fb2402319a4442989c5b897b1eb02e222354bbb88882f5cb99777a"} Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.897280 4650 scope.go:117] "RemoveContainer" containerID="1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.924192 4650 scope.go:117] "RemoveContainer" containerID="d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.939895 4650 scope.go:117] "RemoveContainer" containerID="1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141" Feb 01 07:44:20 crc kubenswrapper[4650]: E0201 07:44:20.941643 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141\": container with ID starting with 1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141 not found: ID does not exist" containerID="1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.941675 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141"} err="failed to get container status \"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141\": rpc error: code = NotFound desc = could not find container \"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141\": container with ID starting with 1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141 not found: ID does not exist" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.941696 4650 scope.go:117] "RemoveContainer" containerID="d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc" Feb 01 07:44:20 crc kubenswrapper[4650]: E0201 07:44:20.941951 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc\": container with ID starting with d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc not found: ID does not exist" containerID="d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.941971 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc"} err="failed to get container status \"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc\": rpc error: code = NotFound desc = could not find container \"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc\": container with ID starting with d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc not found: ID does not exist" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.941984 4650 scope.go:117] "RemoveContainer" containerID="1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.942173 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141"} err="failed to get container status \"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141\": rpc error: code = NotFound desc = could not find container \"1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141\": container with ID starting with 1222d705b88296101c543ad0bc51a1ec23d372009874d27578831b309ba9d141 not found: ID does not exist" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.942190 4650 scope.go:117] "RemoveContainer" containerID="d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.942365 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc"} err="failed to get container status \"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc\": rpc error: code = NotFound desc = could not find container \"d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc\": container with ID starting with d27f1c605a8a5ba127c42ee5c17240ff98600328d109b7464a600906802393dc not found: ID does not exist" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.969004 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-config-data\") pod \"154566db-5f09-4112-a592-433b8ea8bd5c\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.969085 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n95tb\" (UniqueName: \"kubernetes.io/projected/154566db-5f09-4112-a592-433b8ea8bd5c-kube-api-access-n95tb\") pod \"154566db-5f09-4112-a592-433b8ea8bd5c\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.969141 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154566db-5f09-4112-a592-433b8ea8bd5c-logs\") pod \"154566db-5f09-4112-a592-433b8ea8bd5c\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.969200 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-combined-ca-bundle\") pod \"154566db-5f09-4112-a592-433b8ea8bd5c\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.969318 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-nova-metadata-tls-certs\") pod \"154566db-5f09-4112-a592-433b8ea8bd5c\" (UID: \"154566db-5f09-4112-a592-433b8ea8bd5c\") " Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.969694 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/154566db-5f09-4112-a592-433b8ea8bd5c-logs" (OuterVolumeSpecName: "logs") pod "154566db-5f09-4112-a592-433b8ea8bd5c" (UID: "154566db-5f09-4112-a592-433b8ea8bd5c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:20 crc kubenswrapper[4650]: I0201 07:44:20.976017 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/154566db-5f09-4112-a592-433b8ea8bd5c-kube-api-access-n95tb" (OuterVolumeSpecName: "kube-api-access-n95tb") pod "154566db-5f09-4112-a592-433b8ea8bd5c" (UID: "154566db-5f09-4112-a592-433b8ea8bd5c"). InnerVolumeSpecName "kube-api-access-n95tb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.021036 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "154566db-5f09-4112-a592-433b8ea8bd5c" (UID: "154566db-5f09-4112-a592-433b8ea8bd5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.036764 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-config-data" (OuterVolumeSpecName: "config-data") pod "154566db-5f09-4112-a592-433b8ea8bd5c" (UID: "154566db-5f09-4112-a592-433b8ea8bd5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.072796 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154566db-5f09-4112-a592-433b8ea8bd5c-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.072851 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.072864 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.072876 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n95tb\" (UniqueName: \"kubernetes.io/projected/154566db-5f09-4112-a592-433b8ea8bd5c-kube-api-access-n95tb\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.087702 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "154566db-5f09-4112-a592-433b8ea8bd5c" (UID: "154566db-5f09-4112-a592-433b8ea8bd5c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.178360 4650 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/154566db-5f09-4112-a592-433b8ea8bd5c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.236407 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.251822 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.252253 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.261128 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:21 crc kubenswrapper[4650]: E0201 07:44:21.261962 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" containerName="nova-manage" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.262063 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" containerName="nova-manage" Feb 01 07:44:21 crc kubenswrapper[4650]: E0201 07:44:21.262145 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerName="dnsmasq-dns" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.262261 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerName="dnsmasq-dns" Feb 01 07:44:21 crc kubenswrapper[4650]: E0201 07:44:21.262361 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerName="init" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.262440 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerName="init" Feb 01 07:44:21 crc kubenswrapper[4650]: E0201 07:44:21.262517 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="471ec131-07c2-4fd8-a63a-e36c42859d92" containerName="nova-cell1-conductor-db-sync" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.262589 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="471ec131-07c2-4fd8-a63a-e36c42859d92" containerName="nova-cell1-conductor-db-sync" Feb 01 07:44:21 crc kubenswrapper[4650]: E0201 07:44:21.262687 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-metadata" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.262770 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-metadata" Feb 01 07:44:21 crc kubenswrapper[4650]: E0201 07:44:21.262849 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-log" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.262926 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-log" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.263230 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-log" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.263327 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f5171ae-f202-4f51-a05b-c13e7136959c" containerName="dnsmasq-dns" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.263389 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="471ec131-07c2-4fd8-a63a-e36c42859d92" containerName="nova-cell1-conductor-db-sync" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.263440 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" containerName="nova-manage" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.263506 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" containerName="nova-metadata-metadata" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.264578 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.266940 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.268701 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.292104 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.384607 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-config-data\") pod \"471ec131-07c2-4fd8-a63a-e36c42859d92\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.384979 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-combined-ca-bundle\") pod \"471ec131-07c2-4fd8-a63a-e36c42859d92\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.385089 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrmrc\" (UniqueName: \"kubernetes.io/projected/471ec131-07c2-4fd8-a63a-e36c42859d92-kube-api-access-xrmrc\") pod \"471ec131-07c2-4fd8-a63a-e36c42859d92\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.385192 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-scripts\") pod \"471ec131-07c2-4fd8-a63a-e36c42859d92\" (UID: \"471ec131-07c2-4fd8-a63a-e36c42859d92\") " Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.385846 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbggq\" (UniqueName: \"kubernetes.io/projected/56a63441-07a6-4b3c-bee6-ccc803825470-kube-api-access-hbggq\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.385988 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.386115 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.386250 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56a63441-07a6-4b3c-bee6-ccc803825470-logs\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.386380 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-config-data\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.390876 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/471ec131-07c2-4fd8-a63a-e36c42859d92-kube-api-access-xrmrc" (OuterVolumeSpecName: "kube-api-access-xrmrc") pod "471ec131-07c2-4fd8-a63a-e36c42859d92" (UID: "471ec131-07c2-4fd8-a63a-e36c42859d92"). InnerVolumeSpecName "kube-api-access-xrmrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.397080 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-scripts" (OuterVolumeSpecName: "scripts") pod "471ec131-07c2-4fd8-a63a-e36c42859d92" (UID: "471ec131-07c2-4fd8-a63a-e36c42859d92"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.421181 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-config-data" (OuterVolumeSpecName: "config-data") pod "471ec131-07c2-4fd8-a63a-e36c42859d92" (UID: "471ec131-07c2-4fd8-a63a-e36c42859d92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.446974 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "471ec131-07c2-4fd8-a63a-e36c42859d92" (UID: "471ec131-07c2-4fd8-a63a-e36c42859d92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.487765 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbggq\" (UniqueName: \"kubernetes.io/projected/56a63441-07a6-4b3c-bee6-ccc803825470-kube-api-access-hbggq\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.487837 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.487859 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.487907 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56a63441-07a6-4b3c-bee6-ccc803825470-logs\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.487940 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-config-data\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.488013 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.488056 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrmrc\" (UniqueName: \"kubernetes.io/projected/471ec131-07c2-4fd8-a63a-e36c42859d92-kube-api-access-xrmrc\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.488068 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.488077 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/471ec131-07c2-4fd8-a63a-e36c42859d92-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.488659 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56a63441-07a6-4b3c-bee6-ccc803825470-logs\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.491459 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.491800 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.494616 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-config-data\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.506279 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbggq\" (UniqueName: \"kubernetes.io/projected/56a63441-07a6-4b3c-bee6-ccc803825470-kube-api-access-hbggq\") pod \"nova-metadata-0\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.588440 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.908569 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a1ad86d5-4286-47cd-899c-7c5ec57112ab" containerName="nova-scheduler-scheduler" containerID="cri-o://5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa" gracePeriod=30 Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.909098 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.909255 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-jzxjl" event={"ID":"471ec131-07c2-4fd8-a63a-e36c42859d92","Type":"ContainerDied","Data":"18495f3abfa0c95d7e094f37ab6c91e86a400a889cc0915b7d5efccf03247642"} Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.909335 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18495f3abfa0c95d7e094f37ab6c91e86a400a889cc0915b7d5efccf03247642" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.977252 4650 scope.go:117] "RemoveContainer" containerID="3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2" Feb 01 07:44:21 crc kubenswrapper[4650]: I0201 07:44:21.977285 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.003668 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="154566db-5f09-4112-a592-433b8ea8bd5c" path="/var/lib/kubelet/pods/154566db-5f09-4112-a592-433b8ea8bd5c/volumes" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.007705 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f5171ae-f202-4f51-a05b-c13e7136959c" path="/var/lib/kubelet/pods/4f5171ae-f202-4f51-a05b-c13e7136959c/volumes" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.008612 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.013116 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.013235 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.018550 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.145928 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.206273 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.206651 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.206695 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpg96\" (UniqueName: \"kubernetes.io/projected/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-kube-api-access-xpg96\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: E0201 07:44:22.287415 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.308356 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.308545 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.308630 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpg96\" (UniqueName: \"kubernetes.io/projected/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-kube-api-access-xpg96\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.315130 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.320867 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.327509 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpg96\" (UniqueName: \"kubernetes.io/projected/74d70e84-b5e5-4146-a88f-23b2ece2f6f4-kube-api-access-xpg96\") pod \"nova-cell1-conductor-0\" (UID: \"74d70e84-b5e5-4146-a88f-23b2ece2f6f4\") " pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.331755 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.798509 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.923609 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e"} Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.923861 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.924310 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.927298 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"74d70e84-b5e5-4146-a88f-23b2ece2f6f4","Type":"ContainerStarted","Data":"0c274eab82876cff4f2f589a3c84d304b145f86696790c3a9a6ff7ac6bdaa56c"} Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.931748 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"56a63441-07a6-4b3c-bee6-ccc803825470","Type":"ContainerStarted","Data":"4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98"} Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.931778 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"56a63441-07a6-4b3c-bee6-ccc803825470","Type":"ContainerStarted","Data":"48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf"} Feb 01 07:44:22 crc kubenswrapper[4650]: I0201 07:44:22.931787 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"56a63441-07a6-4b3c-bee6-ccc803825470","Type":"ContainerStarted","Data":"863bf0ccf6fc2cf5d0f07ff353db639b4fc717c4258e6c8775c7196fe526bd8e"} Feb 01 07:44:23 crc kubenswrapper[4650]: I0201 07:44:23.076000 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.075980367 podStartE2EDuration="2.075980367s" podCreationTimestamp="2026-02-01 07:44:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:23.06194342 +0000 UTC m=+1261.785041665" watchObservedRunningTime="2026-02-01 07:44:23.075980367 +0000 UTC m=+1261.799078612" Feb 01 07:44:23 crc kubenswrapper[4650]: E0201 07:44:23.326011 4650 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 01 07:44:23 crc kubenswrapper[4650]: E0201 07:44:23.326893 4650 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 01 07:44:23 crc kubenswrapper[4650]: E0201 07:44:23.327734 4650 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 01 07:44:23 crc kubenswrapper[4650]: E0201 07:44:23.327763 4650 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a1ad86d5-4286-47cd-899c-7c5ec57112ab" containerName="nova-scheduler-scheduler" Feb 01 07:44:23 crc kubenswrapper[4650]: I0201 07:44:23.835359 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 01 07:44:23 crc kubenswrapper[4650]: I0201 07:44:23.943124 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"74d70e84-b5e5-4146-a88f-23b2ece2f6f4","Type":"ContainerStarted","Data":"81d543f6d4e40adb090d00a52fce16a3645088847bb3c2829d1d9c80cb76712e"} Feb 01 07:44:23 crc kubenswrapper[4650]: I0201 07:44:23.943249 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:23 crc kubenswrapper[4650]: I0201 07:44:23.949514 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558"} Feb 01 07:44:23 crc kubenswrapper[4650]: I0201 07:44:23.980207 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.978569288 podStartE2EDuration="2.978569288s" podCreationTimestamp="2026-02-01 07:44:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:23.964388397 +0000 UTC m=+1262.687486652" watchObservedRunningTime="2026-02-01 07:44:23.978569288 +0000 UTC m=+1262.701667533" Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.800254 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.960781 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" exitCode=1 Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.961877 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:24 crc kubenswrapper[4650]: E0201 07:44:24.962066 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.962209 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558"} Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.962242 4650 scope.go:117] "RemoveContainer" containerID="364999bd64808358fe001acc7a8515bd3d203a1e0fa5d33e6905e4adb9c816a4" Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.966793 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.966949 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:44:24 crc kubenswrapper[4650]: I0201 07:44:24.967218 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:44:24 crc kubenswrapper[4650]: E0201 07:44:24.967767 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:44:25 crc kubenswrapper[4650]: I0201 07:44:25.981334 4650 generic.go:334] "Generic (PLEG): container finished" podID="a1ad86d5-4286-47cd-899c-7c5ec57112ab" containerID="5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa" exitCode=0 Feb 01 07:44:25 crc kubenswrapper[4650]: I0201 07:44:25.981401 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a1ad86d5-4286-47cd-899c-7c5ec57112ab","Type":"ContainerDied","Data":"5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa"} Feb 01 07:44:25 crc kubenswrapper[4650]: I0201 07:44:25.981640 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a1ad86d5-4286-47cd-899c-7c5ec57112ab","Type":"ContainerDied","Data":"ccff01b3310f7ab05840d16a1106275eeed06dae28e7ab488aca9a66ae9495ec"} Feb 01 07:44:25 crc kubenswrapper[4650]: I0201 07:44:25.981654 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ccff01b3310f7ab05840d16a1106275eeed06dae28e7ab488aca9a66ae9495ec" Feb 01 07:44:25 crc kubenswrapper[4650]: I0201 07:44:25.984672 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:25 crc kubenswrapper[4650]: E0201 07:44:25.984857 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:25 crc kubenswrapper[4650]: I0201 07:44:25.998052 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.112816 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-combined-ca-bundle\") pod \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.112943 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-config-data\") pod \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.112991 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5w6z\" (UniqueName: \"kubernetes.io/projected/a1ad86d5-4286-47cd-899c-7c5ec57112ab-kube-api-access-g5w6z\") pod \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\" (UID: \"a1ad86d5-4286-47cd-899c-7c5ec57112ab\") " Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.120713 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1ad86d5-4286-47cd-899c-7c5ec57112ab-kube-api-access-g5w6z" (OuterVolumeSpecName: "kube-api-access-g5w6z") pod "a1ad86d5-4286-47cd-899c-7c5ec57112ab" (UID: "a1ad86d5-4286-47cd-899c-7c5ec57112ab"). InnerVolumeSpecName "kube-api-access-g5w6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.158445 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-config-data" (OuterVolumeSpecName: "config-data") pod "a1ad86d5-4286-47cd-899c-7c5ec57112ab" (UID: "a1ad86d5-4286-47cd-899c-7c5ec57112ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.162570 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1ad86d5-4286-47cd-899c-7c5ec57112ab" (UID: "a1ad86d5-4286-47cd-899c-7c5ec57112ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.214654 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.214690 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1ad86d5-4286-47cd-899c-7c5ec57112ab-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.214701 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5w6z\" (UniqueName: \"kubernetes.io/projected/a1ad86d5-4286-47cd-899c-7c5ec57112ab-kube-api-access-g5w6z\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.589085 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.589364 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.993566 4650 generic.go:334] "Generic (PLEG): container finished" podID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerID="29d2a0b9802f4ccdfd166dcda223ee7acbd4fcd64e73643ff9b06682cb17ed4e" exitCode=0 Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.994617 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.994158 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"51d39239-82ba-4408-a37b-c183f8e9fdea","Type":"ContainerDied","Data":"29d2a0b9802f4ccdfd166dcda223ee7acbd4fcd64e73643ff9b06682cb17ed4e"} Feb 01 07:44:26 crc kubenswrapper[4650]: I0201 07:44:26.994755 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:44:26 crc kubenswrapper[4650]: E0201 07:44:26.995098 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.023172 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.054248 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.075119 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:27 crc kubenswrapper[4650]: E0201 07:44:27.081278 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ad86d5-4286-47cd-899c-7c5ec57112ab" containerName="nova-scheduler-scheduler" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.081368 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ad86d5-4286-47cd-899c-7c5ec57112ab" containerName="nova-scheduler-scheduler" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.081736 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ad86d5-4286-47cd-899c-7c5ec57112ab" containerName="nova-scheduler-scheduler" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.082517 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.092898 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.095952 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.132444 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.132576 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62rxn\" (UniqueName: \"kubernetes.io/projected/6915bfe3-bba1-4976-a7c0-18129dae5c0c-kube-api-access-62rxn\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.132621 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-config-data\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.236066 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62rxn\" (UniqueName: \"kubernetes.io/projected/6915bfe3-bba1-4976-a7c0-18129dae5c0c-kube-api-access-62rxn\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.236178 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-config-data\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.236230 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.241538 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-config-data\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.249788 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.259580 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62rxn\" (UniqueName: \"kubernetes.io/projected/6915bfe3-bba1-4976-a7c0-18129dae5c0c-kube-api-access-62rxn\") pod \"nova-scheduler-0\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.434354 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.450098 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.645203 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-config-data\") pod \"51d39239-82ba-4408-a37b-c183f8e9fdea\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.645342 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d39239-82ba-4408-a37b-c183f8e9fdea-logs\") pod \"51d39239-82ba-4408-a37b-c183f8e9fdea\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.645361 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle\") pod \"51d39239-82ba-4408-a37b-c183f8e9fdea\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.645427 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s54kq\" (UniqueName: \"kubernetes.io/projected/51d39239-82ba-4408-a37b-c183f8e9fdea-kube-api-access-s54kq\") pod \"51d39239-82ba-4408-a37b-c183f8e9fdea\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.650428 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51d39239-82ba-4408-a37b-c183f8e9fdea-logs" (OuterVolumeSpecName: "logs") pod "51d39239-82ba-4408-a37b-c183f8e9fdea" (UID: "51d39239-82ba-4408-a37b-c183f8e9fdea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.650518 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51d39239-82ba-4408-a37b-c183f8e9fdea-kube-api-access-s54kq" (OuterVolumeSpecName: "kube-api-access-s54kq") pod "51d39239-82ba-4408-a37b-c183f8e9fdea" (UID: "51d39239-82ba-4408-a37b-c183f8e9fdea"). InnerVolumeSpecName "kube-api-access-s54kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:27 crc kubenswrapper[4650]: E0201 07:44:27.679499 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle podName:51d39239-82ba-4408-a37b-c183f8e9fdea nodeName:}" failed. No retries permitted until 2026-02-01 07:44:28.17946427 +0000 UTC m=+1266.902562525 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle") pod "51d39239-82ba-4408-a37b-c183f8e9fdea" (UID: "51d39239-82ba-4408-a37b-c183f8e9fdea") : error deleting /var/lib/kubelet/pods/51d39239-82ba-4408-a37b-c183f8e9fdea/volume-subpaths: remove /var/lib/kubelet/pods/51d39239-82ba-4408-a37b-c183f8e9fdea/volume-subpaths: no such file or directory Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.682490 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-config-data" (OuterVolumeSpecName: "config-data") pod "51d39239-82ba-4408-a37b-c183f8e9fdea" (UID: "51d39239-82ba-4408-a37b-c183f8e9fdea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.701474 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.753580 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s54kq\" (UniqueName: \"kubernetes.io/projected/51d39239-82ba-4408-a37b-c183f8e9fdea-kube-api-access-s54kq\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.753776 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.753837 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d39239-82ba-4408-a37b-c183f8e9fdea-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.800548 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.806247 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:44:27 crc kubenswrapper[4650]: I0201 07:44:27.975882 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1ad86d5-4286-47cd-899c-7c5ec57112ab" path="/var/lib/kubelet/pods/a1ad86d5-4286-47cd-899c-7c5ec57112ab/volumes" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.007746 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"51d39239-82ba-4408-a37b-c183f8e9fdea","Type":"ContainerDied","Data":"ce4e248b37018c4b5489319623fde53cf9c46cdc04ec70b8e1fa84d95ddf016d"} Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.007793 4650 scope.go:117] "RemoveContainer" containerID="29d2a0b9802f4ccdfd166dcda223ee7acbd4fcd64e73643ff9b06682cb17ed4e" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.007929 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.011700 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6915bfe3-bba1-4976-a7c0-18129dae5c0c","Type":"ContainerStarted","Data":"719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de"} Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.011747 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6915bfe3-bba1-4976-a7c0-18129dae5c0c","Type":"ContainerStarted","Data":"bd13734201924745e45c47fa93956563f9664e22d5faf1361439219d576b6652"} Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.012461 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:28 crc kubenswrapper[4650]: E0201 07:44:28.012733 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.019814 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.027501 4650 scope.go:117] "RemoveContainer" containerID="c4d7d51f16e0e5294a5d616dc0fd929e9d535d2ec34bcdbf7db8377559868859" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.035224 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.035205706 podStartE2EDuration="1.035205706s" podCreationTimestamp="2026-02-01 07:44:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:28.028042919 +0000 UTC m=+1266.751141174" watchObservedRunningTime="2026-02-01 07:44:28.035205706 +0000 UTC m=+1266.758303951" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.258663 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.259062 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf" containerName="kube-state-metrics" containerID="cri-o://7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27" gracePeriod=30 Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.264904 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle\") pod \"51d39239-82ba-4408-a37b-c183f8e9fdea\" (UID: \"51d39239-82ba-4408-a37b-c183f8e9fdea\") " Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.270261 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "51d39239-82ba-4408-a37b-c183f8e9fdea" (UID: "51d39239-82ba-4408-a37b-c183f8e9fdea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.368225 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d39239-82ba-4408-a37b-c183f8e9fdea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.453013 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.466367 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.478815 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:28 crc kubenswrapper[4650]: E0201 07:44:28.479207 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-api" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.479222 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-api" Feb 01 07:44:28 crc kubenswrapper[4650]: E0201 07:44:28.479234 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-log" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.479240 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-log" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.479403 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-log" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.479426 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" containerName="nova-api-api" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.480291 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.482746 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.501595 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.672254 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.672344 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-config-data\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.672372 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-logs\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.672446 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xshxj\" (UniqueName: \"kubernetes.io/projected/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-kube-api-access-xshxj\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.773533 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xshxj\" (UniqueName: \"kubernetes.io/projected/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-kube-api-access-xshxj\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.773698 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.773723 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-config-data\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.773747 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-logs\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.774129 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-logs\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.777715 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-config-data\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.777897 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.789611 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xshxj\" (UniqueName: \"kubernetes.io/projected/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-kube-api-access-xshxj\") pod \"nova-api-0\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.792569 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.813217 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:28 crc kubenswrapper[4650]: I0201 07:44:28.977373 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kjqv\" (UniqueName: \"kubernetes.io/projected/22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf-kube-api-access-8kjqv\") pod \"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf\" (UID: \"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf\") " Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:28.983658 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf-kube-api-access-8kjqv" (OuterVolumeSpecName: "kube-api-access-8kjqv") pod "22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf" (UID: "22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf"). InnerVolumeSpecName "kube-api-access-8kjqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.027377 4650 generic.go:334] "Generic (PLEG): container finished" podID="22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf" containerID="7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27" exitCode=2 Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.027442 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf","Type":"ContainerDied","Data":"7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27"} Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.027467 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf","Type":"ContainerDied","Data":"91ebfdceac486dfa12d0b8bf5ab5981a94bfa0f3ffa97147bba710599287763a"} Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.027470 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.027483 4650 scope.go:117] "RemoveContainer" containerID="7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.064018 4650 scope.go:117] "RemoveContainer" containerID="7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27" Feb 01 07:44:29 crc kubenswrapper[4650]: E0201 07:44:29.064426 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27\": container with ID starting with 7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27 not found: ID does not exist" containerID="7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.064461 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27"} err="failed to get container status \"7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27\": rpc error: code = NotFound desc = could not find container \"7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27\": container with ID starting with 7fb4f2f0723f3f8a4ab1fdbcce31f0ee44cb470a5e66f8afb6bdc902d42cfa27 not found: ID does not exist" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.080208 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.082305 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kjqv\" (UniqueName: \"kubernetes.io/projected/22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf-kube-api-access-8kjqv\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.098707 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.109092 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:44:29 crc kubenswrapper[4650]: E0201 07:44:29.109767 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf" containerName="kube-state-metrics" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.109782 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf" containerName="kube-state-metrics" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.109980 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf" containerName="kube-state-metrics" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.110558 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.112146 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.112484 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.130684 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.184416 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.184723 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.184777 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.184992 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brj5n\" (UniqueName: \"kubernetes.io/projected/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-api-access-brj5n\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.281440 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:29 crc kubenswrapper[4650]: W0201 07:44:29.283570 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d4897b8_cc8b_40cd_a14c_6ae66f5f606a.slice/crio-6b16e36c65efd3a9702222ac45fa3626468a2b2f2cc1d50aa8a5a00dd14fd6b1 WatchSource:0}: Error finding container 6b16e36c65efd3a9702222ac45fa3626468a2b2f2cc1d50aa8a5a00dd14fd6b1: Status 404 returned error can't find the container with id 6b16e36c65efd3a9702222ac45fa3626468a2b2f2cc1d50aa8a5a00dd14fd6b1 Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.285986 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.286542 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.286670 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.286728 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brj5n\" (UniqueName: \"kubernetes.io/projected/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-api-access-brj5n\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.293908 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.303863 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.304271 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brj5n\" (UniqueName: \"kubernetes.io/projected/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-kube-api-access-brj5n\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.304448 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c77c7ff-72e7-4635-b2b8-2e523265c4ff-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1c77c7ff-72e7-4635-b2b8-2e523265c4ff\") " pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.435551 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.806799 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.996550 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf" path="/var/lib/kubelet/pods/22fb10eb-bb45-474c-8a0e-4a1f73b8dbcf/volumes" Feb 01 07:44:29 crc kubenswrapper[4650]: I0201 07:44:29.997870 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51d39239-82ba-4408-a37b-c183f8e9fdea" path="/var/lib/kubelet/pods/51d39239-82ba-4408-a37b-c183f8e9fdea/volumes" Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.030773 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.086168 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a","Type":"ContainerStarted","Data":"3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80"} Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.086211 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a","Type":"ContainerStarted","Data":"e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054"} Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.086221 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a","Type":"ContainerStarted","Data":"6b16e36c65efd3a9702222ac45fa3626468a2b2f2cc1d50aa8a5a00dd14fd6b1"} Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.107671 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.107653649 podStartE2EDuration="2.107653649s" podCreationTimestamp="2026-02-01 07:44:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:30.104711232 +0000 UTC m=+1268.827809477" watchObservedRunningTime="2026-02-01 07:44:30.107653649 +0000 UTC m=+1268.830751894" Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.442411 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.442645 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-central-agent" containerID="cri-o://5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a" gracePeriod=30 Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.443015 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="proxy-httpd" containerID="cri-o://acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc" gracePeriod=30 Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.443434 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="sg-core" containerID="cri-o://233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456" gracePeriod=30 Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.443468 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-notification-agent" containerID="cri-o://e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41" gracePeriod=30 Feb 01 07:44:30 crc kubenswrapper[4650]: I0201 07:44:30.808675 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.097422 4650 generic.go:334] "Generic (PLEG): container finished" podID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerID="acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc" exitCode=0 Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.097451 4650 generic.go:334] "Generic (PLEG): container finished" podID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerID="233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456" exitCode=2 Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.097458 4650 generic.go:334] "Generic (PLEG): container finished" podID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerID="5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a" exitCode=0 Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.097543 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerDied","Data":"acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc"} Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.097602 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerDied","Data":"233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456"} Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.097624 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerDied","Data":"5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a"} Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.099230 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1c77c7ff-72e7-4635-b2b8-2e523265c4ff","Type":"ContainerStarted","Data":"f7408bdcf66d6f9e2f40b46427d80f1275107bcd6d6a5e27ce3d013d8ec17f42"} Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.099259 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1c77c7ff-72e7-4635-b2b8-2e523265c4ff","Type":"ContainerStarted","Data":"5b0a2584acd7a294c51a7141bc25f9e77dd5a17a0e4babd2a243c80ce26b533a"} Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.118099 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.745769211 podStartE2EDuration="2.118083691s" podCreationTimestamp="2026-02-01 07:44:29 +0000 UTC" firstStartedPulling="2026-02-01 07:44:30.068255968 +0000 UTC m=+1268.791354213" lastFinishedPulling="2026-02-01 07:44:30.440570448 +0000 UTC m=+1269.163668693" observedRunningTime="2026-02-01 07:44:31.117198738 +0000 UTC m=+1269.840296993" watchObservedRunningTime="2026-02-01 07:44:31.118083691 +0000 UTC m=+1269.841181946" Feb 01 07:44:31 crc kubenswrapper[4650]: E0201 07:44:31.193048 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.588983 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.589138 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 01 07:44:31 crc kubenswrapper[4650]: I0201 07:44:31.867624 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.039345 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-sg-core-conf-yaml\") pod \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.039404 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4lqq\" (UniqueName: \"kubernetes.io/projected/2ab92c14-2c1a-4176-b50f-61cf7eba5262-kube-api-access-j4lqq\") pod \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.039473 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-scripts\") pod \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.039521 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-run-httpd\") pod \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.039540 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-combined-ca-bundle\") pod \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.039558 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-config-data\") pod \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.039604 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-log-httpd\") pod \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\" (UID: \"2ab92c14-2c1a-4176-b50f-61cf7eba5262\") " Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.040431 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2ab92c14-2c1a-4176-b50f-61cf7eba5262" (UID: "2ab92c14-2c1a-4176-b50f-61cf7eba5262"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.041399 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2ab92c14-2c1a-4176-b50f-61cf7eba5262" (UID: "2ab92c14-2c1a-4176-b50f-61cf7eba5262"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.045613 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-scripts" (OuterVolumeSpecName: "scripts") pod "2ab92c14-2c1a-4176-b50f-61cf7eba5262" (UID: "2ab92c14-2c1a-4176-b50f-61cf7eba5262"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.047511 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ab92c14-2c1a-4176-b50f-61cf7eba5262-kube-api-access-j4lqq" (OuterVolumeSpecName: "kube-api-access-j4lqq") pod "2ab92c14-2c1a-4176-b50f-61cf7eba5262" (UID: "2ab92c14-2c1a-4176-b50f-61cf7eba5262"). InnerVolumeSpecName "kube-api-access-j4lqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.066387 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2ab92c14-2c1a-4176-b50f-61cf7eba5262" (UID: "2ab92c14-2c1a-4176-b50f-61cf7eba5262"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.122486 4650 generic.go:334] "Generic (PLEG): container finished" podID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerID="e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41" exitCode=0 Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.123280 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.123699 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerDied","Data":"e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41"} Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.123724 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ab92c14-2c1a-4176-b50f-61cf7eba5262","Type":"ContainerDied","Data":"429301d6eee109928a983d067c8e04b7480be48c56ed4fc217df66ef789f5ed1"} Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.123737 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.123765 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.124005 4650 scope.go:117] "RemoveContainer" containerID="acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.141853 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.141877 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4lqq\" (UniqueName: \"kubernetes.io/projected/2ab92c14-2c1a-4176-b50f-61cf7eba5262-kube-api-access-j4lqq\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.141888 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.141896 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.141913 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ab92c14-2c1a-4176-b50f-61cf7eba5262-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.157673 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ab92c14-2c1a-4176-b50f-61cf7eba5262" (UID: "2ab92c14-2c1a-4176-b50f-61cf7eba5262"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.167545 4650 scope.go:117] "RemoveContainer" containerID="233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.185887 4650 scope.go:117] "RemoveContainer" containerID="e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.188412 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-config-data" (OuterVolumeSpecName: "config-data") pod "2ab92c14-2c1a-4176-b50f-61cf7eba5262" (UID: "2ab92c14-2c1a-4176-b50f-61cf7eba5262"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.209285 4650 scope.go:117] "RemoveContainer" containerID="5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.228040 4650 scope.go:117] "RemoveContainer" containerID="acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc" Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.228482 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc\": container with ID starting with acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc not found: ID does not exist" containerID="acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.228536 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc"} err="failed to get container status \"acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc\": rpc error: code = NotFound desc = could not find container \"acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc\": container with ID starting with acb2326861b642936430b58f6eb55495b8a5f2fde3cf157c7e1129a460b267fc not found: ID does not exist" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.228565 4650 scope.go:117] "RemoveContainer" containerID="233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456" Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.228906 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456\": container with ID starting with 233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456 not found: ID does not exist" containerID="233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.228923 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456"} err="failed to get container status \"233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456\": rpc error: code = NotFound desc = could not find container \"233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456\": container with ID starting with 233e62c466d08d7aca551e5781f4b5f18d77d820f939af334da749f0f4034456 not found: ID does not exist" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.228937 4650 scope.go:117] "RemoveContainer" containerID="e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41" Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.229176 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41\": container with ID starting with e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41 not found: ID does not exist" containerID="e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.229210 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41"} err="failed to get container status \"e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41\": rpc error: code = NotFound desc = could not find container \"e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41\": container with ID starting with e370b0f0d90b7331ac30094df1ab161531b300ad6e6711d1f08d6d242b5cfb41 not found: ID does not exist" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.229221 4650 scope.go:117] "RemoveContainer" containerID="5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a" Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.229442 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a\": container with ID starting with 5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a not found: ID does not exist" containerID="5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.229461 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a"} err="failed to get container status \"5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a\": rpc error: code = NotFound desc = could not find container \"5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a\": container with ID starting with 5f3e9cbda5a45d6320c870b27f0563d68c90cb100f2f10c214a82f0efa2e3c4a not found: ID does not exist" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.243474 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.243503 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ab92c14-2c1a-4176-b50f-61cf7eba5262-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.358631 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.434722 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.451886 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.463295 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.476359 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.476937 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="sg-core" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477000 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="sg-core" Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.477088 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="proxy-httpd" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477147 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="proxy-httpd" Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.477208 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-central-agent" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477306 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-central-agent" Feb 01 07:44:32 crc kubenswrapper[4650]: E0201 07:44:32.477391 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-notification-agent" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477439 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-notification-agent" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477721 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="proxy-httpd" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477790 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-central-agent" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477846 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="sg-core" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.477913 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" containerName="ceilometer-notification-agent" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.479506 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.485536 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.485683 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.485537 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.494115 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.599185 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.599198 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.649178 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.649233 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.649252 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4bp4\" (UniqueName: \"kubernetes.io/projected/b54bd805-2e08-4935-a7c6-8eff819c8011-kube-api-access-d4bp4\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.649435 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.649579 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-log-httpd\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.649873 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-scripts\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.651053 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-run-httpd\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.651154 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-config-data\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.752594 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-scripts\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.752643 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-run-httpd\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.752729 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-config-data\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.752812 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.752836 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.752856 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4bp4\" (UniqueName: \"kubernetes.io/projected/b54bd805-2e08-4935-a7c6-8eff819c8011-kube-api-access-d4bp4\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.752886 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.753763 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-log-httpd\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.754411 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-log-httpd\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.754753 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-run-httpd\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.759738 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-scripts\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.760419 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.762363 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.762958 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-config-data\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.784508 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.791789 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4bp4\" (UniqueName: \"kubernetes.io/projected/b54bd805-2e08-4935-a7c6-8eff819c8011-kube-api-access-d4bp4\") pod \"ceilometer-0\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " pod="openstack/ceilometer-0" Feb 01 07:44:32 crc kubenswrapper[4650]: I0201 07:44:32.806872 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.385181 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.806051 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.806142 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.807253 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.807282 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.807316 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" gracePeriod=30 Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.815627 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": EOF" Feb 01 07:44:33 crc kubenswrapper[4650]: I0201 07:44:33.974968 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ab92c14-2c1a-4176-b50f-61cf7eba5262" path="/var/lib/kubelet/pods/2ab92c14-2c1a-4176-b50f-61cf7eba5262/volumes" Feb 01 07:44:34 crc kubenswrapper[4650]: E0201 07:44:34.033432 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:34 crc kubenswrapper[4650]: I0201 07:44:34.142147 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" exitCode=0 Feb 01 07:44:34 crc kubenswrapper[4650]: I0201 07:44:34.142223 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e"} Feb 01 07:44:34 crc kubenswrapper[4650]: I0201 07:44:34.142262 4650 scope.go:117] "RemoveContainer" containerID="3253537655fa707d10b86cd13bca8974885a06833d526a0a78a516111a7bdcc2" Feb 01 07:44:34 crc kubenswrapper[4650]: I0201 07:44:34.142979 4650 scope.go:117] "RemoveContainer" containerID="63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" Feb 01 07:44:34 crc kubenswrapper[4650]: I0201 07:44:34.143034 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:34 crc kubenswrapper[4650]: E0201 07:44:34.143302 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:34 crc kubenswrapper[4650]: I0201 07:44:34.148815 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerStarted","Data":"f7a98535e90f4eeac11e781183b0c65a161d8da70c7d2fe0b701a979c4661877"} Feb 01 07:44:34 crc kubenswrapper[4650]: I0201 07:44:34.592532 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:44:34 crc kubenswrapper[4650]: E0201 07:44:34.592676 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:44:34 crc kubenswrapper[4650]: E0201 07:44:34.592748 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:46:36.592728894 +0000 UTC m=+1395.315827129 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:44:35 crc kubenswrapper[4650]: I0201 07:44:35.161062 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerStarted","Data":"f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d"} Feb 01 07:44:35 crc kubenswrapper[4650]: I0201 07:44:35.161357 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerStarted","Data":"f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61"} Feb 01 07:44:35 crc kubenswrapper[4650]: I0201 07:44:35.965530 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:44:35 crc kubenswrapper[4650]: I0201 07:44:35.965886 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:44:35 crc kubenswrapper[4650]: I0201 07:44:35.965969 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:44:35 crc kubenswrapper[4650]: E0201 07:44:35.966316 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:44:36 crc kubenswrapper[4650]: I0201 07:44:36.170829 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerStarted","Data":"1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465"} Feb 01 07:44:37 crc kubenswrapper[4650]: I0201 07:44:37.161118 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:44:37 crc kubenswrapper[4650]: I0201 07:44:37.161469 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:44:37 crc kubenswrapper[4650]: I0201 07:44:37.434796 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 01 07:44:37 crc kubenswrapper[4650]: I0201 07:44:37.466388 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 01 07:44:38 crc kubenswrapper[4650]: I0201 07:44:38.190716 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerStarted","Data":"26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11"} Feb 01 07:44:38 crc kubenswrapper[4650]: I0201 07:44:38.190885 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:44:38 crc kubenswrapper[4650]: I0201 07:44:38.225053 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.432559375 podStartE2EDuration="6.225019142s" podCreationTimestamp="2026-02-01 07:44:32 +0000 UTC" firstStartedPulling="2026-02-01 07:44:33.389840548 +0000 UTC m=+1272.112938793" lastFinishedPulling="2026-02-01 07:44:37.182300305 +0000 UTC m=+1275.905398560" observedRunningTime="2026-02-01 07:44:38.220827202 +0000 UTC m=+1276.943925507" watchObservedRunningTime="2026-02-01 07:44:38.225019142 +0000 UTC m=+1276.948117387" Feb 01 07:44:38 crc kubenswrapper[4650]: I0201 07:44:38.244854 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 01 07:44:38 crc kubenswrapper[4650]: I0201 07:44:38.814447 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 01 07:44:38 crc kubenswrapper[4650]: I0201 07:44:38.814493 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 01 07:44:39 crc kubenswrapper[4650]: I0201 07:44:39.449545 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 01 07:44:39 crc kubenswrapper[4650]: I0201 07:44:39.897408 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.201:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:44:39 crc kubenswrapper[4650]: I0201 07:44:39.897244 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.201:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:44:41 crc kubenswrapper[4650]: I0201 07:44:41.595373 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 01 07:44:41 crc kubenswrapper[4650]: I0201 07:44:41.599884 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 01 07:44:41 crc kubenswrapper[4650]: I0201 07:44:41.602488 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 01 07:44:42 crc kubenswrapper[4650]: I0201 07:44:42.234127 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.248899 4650 generic.go:334] "Generic (PLEG): container finished" podID="6876de01-6095-4fe4-a799-37444b455a82" containerID="2da597a32f0a46d324d9340fc57a17fe507ac7fa3133ce14d350b1ad1709332b" exitCode=137 Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.249021 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6876de01-6095-4fe4-a799-37444b455a82","Type":"ContainerDied","Data":"2da597a32f0a46d324d9340fc57a17fe507ac7fa3133ce14d350b1ad1709332b"} Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.250626 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6876de01-6095-4fe4-a799-37444b455a82","Type":"ContainerDied","Data":"70d766ce5870942f7aa18d497e486ac891f00574c4b45cfa3c739dbdec2650bd"} Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.250652 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70d766ce5870942f7aa18d497e486ac891f00574c4b45cfa3c739dbdec2650bd" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.301355 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.487654 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqjsj\" (UniqueName: \"kubernetes.io/projected/6876de01-6095-4fe4-a799-37444b455a82-kube-api-access-cqjsj\") pod \"6876de01-6095-4fe4-a799-37444b455a82\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.487725 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-config-data\") pod \"6876de01-6095-4fe4-a799-37444b455a82\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.487844 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-combined-ca-bundle\") pod \"6876de01-6095-4fe4-a799-37444b455a82\" (UID: \"6876de01-6095-4fe4-a799-37444b455a82\") " Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.493068 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6876de01-6095-4fe4-a799-37444b455a82-kube-api-access-cqjsj" (OuterVolumeSpecName: "kube-api-access-cqjsj") pod "6876de01-6095-4fe4-a799-37444b455a82" (UID: "6876de01-6095-4fe4-a799-37444b455a82"). InnerVolumeSpecName "kube-api-access-cqjsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.515064 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-config-data" (OuterVolumeSpecName: "config-data") pod "6876de01-6095-4fe4-a799-37444b455a82" (UID: "6876de01-6095-4fe4-a799-37444b455a82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.519126 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6876de01-6095-4fe4-a799-37444b455a82" (UID: "6876de01-6095-4fe4-a799-37444b455a82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.590155 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqjsj\" (UniqueName: \"kubernetes.io/projected/6876de01-6095-4fe4-a799-37444b455a82-kube-api-access-cqjsj\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.590748 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.590924 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6876de01-6095-4fe4-a799-37444b455a82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.966124 4650 scope.go:117] "RemoveContainer" containerID="63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" Feb 01 07:44:44 crc kubenswrapper[4650]: I0201 07:44:44.966155 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:44 crc kubenswrapper[4650]: E0201 07:44:44.967957 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.261479 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.317092 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.330336 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.356807 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:45 crc kubenswrapper[4650]: E0201 07:44:45.358712 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6876de01-6095-4fe4-a799-37444b455a82" containerName="nova-cell1-novncproxy-novncproxy" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.358743 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6876de01-6095-4fe4-a799-37444b455a82" containerName="nova-cell1-novncproxy-novncproxy" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.358990 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6876de01-6095-4fe4-a799-37444b455a82" containerName="nova-cell1-novncproxy-novncproxy" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.359972 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.364651 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.365068 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.365321 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.396947 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.419888 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.419968 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.420044 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.420080 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.420215 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htr6m\" (UniqueName: \"kubernetes.io/projected/026358a4-5ccb-421a-b878-c0022296eaa1-kube-api-access-htr6m\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.522972 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.523080 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.523136 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.523194 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.523364 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htr6m\" (UniqueName: \"kubernetes.io/projected/026358a4-5ccb-421a-b878-c0022296eaa1-kube-api-access-htr6m\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.528357 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.528638 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.528731 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.539805 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/026358a4-5ccb-421a-b878-c0022296eaa1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.542514 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htr6m\" (UniqueName: \"kubernetes.io/projected/026358a4-5ccb-421a-b878-c0022296eaa1-kube-api-access-htr6m\") pod \"nova-cell1-novncproxy-0\" (UID: \"026358a4-5ccb-421a-b878-c0022296eaa1\") " pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.694812 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:45 crc kubenswrapper[4650]: I0201 07:44:45.979802 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6876de01-6095-4fe4-a799-37444b455a82" path="/var/lib/kubelet/pods/6876de01-6095-4fe4-a799-37444b455a82/volumes" Feb 01 07:44:46 crc kubenswrapper[4650]: I0201 07:44:46.253382 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 01 07:44:46 crc kubenswrapper[4650]: I0201 07:44:46.278932 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"026358a4-5ccb-421a-b878-c0022296eaa1","Type":"ContainerStarted","Data":"01b3bc278ce77310824b7ac7f54b7cb8b9f7721e57efe229e54a1007bf6f0c36"} Feb 01 07:44:47 crc kubenswrapper[4650]: I0201 07:44:47.292267 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"026358a4-5ccb-421a-b878-c0022296eaa1","Type":"ContainerStarted","Data":"315cd88ca188ba911941d429c27731e194b1907ecdc70aae65322ee7e8511f35"} Feb 01 07:44:47 crc kubenswrapper[4650]: I0201 07:44:47.320928 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.320907311 podStartE2EDuration="2.320907311s" podCreationTimestamp="2026-02-01 07:44:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:47.311048373 +0000 UTC m=+1286.034146658" watchObservedRunningTime="2026-02-01 07:44:47.320907311 +0000 UTC m=+1286.044005576" Feb 01 07:44:48 crc kubenswrapper[4650]: I0201 07:44:48.819318 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 01 07:44:48 crc kubenswrapper[4650]: I0201 07:44:48.820322 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 01 07:44:48 crc kubenswrapper[4650]: I0201 07:44:48.825003 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 01 07:44:48 crc kubenswrapper[4650]: I0201 07:44:48.825078 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.318404 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.324148 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.596630 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5459cb87c-blvph"] Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.599121 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.626065 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5459cb87c-blvph"] Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.718342 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8ksk\" (UniqueName: \"kubernetes.io/projected/a23bf06f-77cb-493c-8d42-f75156a56918-kube-api-access-v8ksk\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.718437 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-ovsdbserver-sb\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.718474 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-config\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.718495 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-dns-svc\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.718567 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-ovsdbserver-nb\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.819878 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-ovsdbserver-sb\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.819951 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-config\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.819978 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-dns-svc\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.820111 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-ovsdbserver-nb\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.820159 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8ksk\" (UniqueName: \"kubernetes.io/projected/a23bf06f-77cb-493c-8d42-f75156a56918-kube-api-access-v8ksk\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.821134 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-dns-svc\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.821163 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-config\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.821649 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-ovsdbserver-sb\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.822208 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a23bf06f-77cb-493c-8d42-f75156a56918-ovsdbserver-nb\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.845839 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8ksk\" (UniqueName: \"kubernetes.io/projected/a23bf06f-77cb-493c-8d42-f75156a56918-kube-api-access-v8ksk\") pod \"dnsmasq-dns-5459cb87c-blvph\" (UID: \"a23bf06f-77cb-493c-8d42-f75156a56918\") " pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:49 crc kubenswrapper[4650]: I0201 07:44:49.935056 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:50 crc kubenswrapper[4650]: I0201 07:44:50.617798 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5459cb87c-blvph"] Feb 01 07:44:50 crc kubenswrapper[4650]: I0201 07:44:50.695879 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:50 crc kubenswrapper[4650]: I0201 07:44:50.965095 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:44:50 crc kubenswrapper[4650]: I0201 07:44:50.965157 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:44:50 crc kubenswrapper[4650]: I0201 07:44:50.965244 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:44:50 crc kubenswrapper[4650]: E0201 07:44:50.965517 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:44:51 crc kubenswrapper[4650]: I0201 07:44:51.334916 4650 generic.go:334] "Generic (PLEG): container finished" podID="a23bf06f-77cb-493c-8d42-f75156a56918" containerID="b714d88f98127196d2f3ba2579ba6628fe702ae8ac8522b6b53735a99b2b6af0" exitCode=0 Feb 01 07:44:51 crc kubenswrapper[4650]: I0201 07:44:51.335953 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5459cb87c-blvph" event={"ID":"a23bf06f-77cb-493c-8d42-f75156a56918","Type":"ContainerDied","Data":"b714d88f98127196d2f3ba2579ba6628fe702ae8ac8522b6b53735a99b2b6af0"} Feb 01 07:44:51 crc kubenswrapper[4650]: I0201 07:44:51.335980 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5459cb87c-blvph" event={"ID":"a23bf06f-77cb-493c-8d42-f75156a56918","Type":"ContainerStarted","Data":"d9c70ccba378f0e508ac742b4dc4930e3e47fed8666e44a8d1f7ce68505b46ea"} Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.263507 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.345120 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5459cb87c-blvph" event={"ID":"a23bf06f-77cb-493c-8d42-f75156a56918","Type":"ContainerStarted","Data":"e9086f02b07d3bf7fa0de438c541913980bf199a707f64570ad8a4f2003da99c"} Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.345439 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-api" containerID="cri-o://3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80" gracePeriod=30 Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.345332 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-log" containerID="cri-o://e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054" gracePeriod=30 Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.385522 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.385785 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-central-agent" containerID="cri-o://f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61" gracePeriod=30 Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.386204 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="proxy-httpd" containerID="cri-o://26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11" gracePeriod=30 Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.386248 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="sg-core" containerID="cri-o://1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465" gracePeriod=30 Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.386382 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-notification-agent" containerID="cri-o://f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d" gracePeriod=30 Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.401157 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5459cb87c-blvph" podStartSLOduration=3.401141865 podStartE2EDuration="3.401141865s" podCreationTimestamp="2026-02-01 07:44:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:52.396267578 +0000 UTC m=+1291.119365823" watchObservedRunningTime="2026-02-01 07:44:52.401141865 +0000 UTC m=+1291.124240110" Feb 01 07:44:52 crc kubenswrapper[4650]: I0201 07:44:52.412389 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.203:3000/\": EOF" Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.387188 4650 generic.go:334] "Generic (PLEG): container finished" podID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerID="e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054" exitCode=143 Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.387273 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a","Type":"ContainerDied","Data":"e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054"} Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.393138 4650 generic.go:334] "Generic (PLEG): container finished" podID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerID="26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11" exitCode=0 Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.393164 4650 generic.go:334] "Generic (PLEG): container finished" podID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerID="1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465" exitCode=2 Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.393175 4650 generic.go:334] "Generic (PLEG): container finished" podID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerID="f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61" exitCode=0 Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.393210 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerDied","Data":"26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11"} Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.393255 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerDied","Data":"1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465"} Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.393266 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerDied","Data":"f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61"} Feb 01 07:44:53 crc kubenswrapper[4650]: I0201 07:44:53.393355 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:44:55 crc kubenswrapper[4650]: I0201 07:44:55.696111 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:55 crc kubenswrapper[4650]: I0201 07:44:55.738831 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.268927 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.276579 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.349957 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-config-data\") pod \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350005 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-sg-core-conf-yaml\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350048 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-ceilometer-tls-certs\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350132 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-combined-ca-bundle\") pod \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350150 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-config-data\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350172 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-combined-ca-bundle\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350243 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-log-httpd\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350290 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-run-httpd\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350321 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4bp4\" (UniqueName: \"kubernetes.io/projected/b54bd805-2e08-4935-a7c6-8eff819c8011-kube-api-access-d4bp4\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350350 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-logs\") pod \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350400 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xshxj\" (UniqueName: \"kubernetes.io/projected/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-kube-api-access-xshxj\") pod \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\" (UID: \"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.350414 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-scripts\") pod \"b54bd805-2e08-4935-a7c6-8eff819c8011\" (UID: \"b54bd805-2e08-4935-a7c6-8eff819c8011\") " Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.356797 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b54bd805-2e08-4935-a7c6-8eff819c8011-kube-api-access-d4bp4" (OuterVolumeSpecName: "kube-api-access-d4bp4") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "kube-api-access-d4bp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.357338 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.357981 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.359255 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-logs" (OuterVolumeSpecName: "logs") pod "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" (UID: "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.361432 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-scripts" (OuterVolumeSpecName: "scripts") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.368003 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-kube-api-access-xshxj" (OuterVolumeSpecName: "kube-api-access-xshxj") pod "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" (UID: "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a"). InnerVolumeSpecName "kube-api-access-xshxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.455281 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4bp4\" (UniqueName: \"kubernetes.io/projected/b54bd805-2e08-4935-a7c6-8eff819c8011-kube-api-access-d4bp4\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.455306 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.455315 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xshxj\" (UniqueName: \"kubernetes.io/projected/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-kube-api-access-xshxj\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.455323 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.455332 4650 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.455358 4650 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b54bd805-2e08-4935-a7c6-8eff819c8011-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.457749 4650 generic.go:334] "Generic (PLEG): container finished" podID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerID="f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d" exitCode=0 Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.457809 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerDied","Data":"f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d"} Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.457835 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b54bd805-2e08-4935-a7c6-8eff819c8011","Type":"ContainerDied","Data":"f7a98535e90f4eeac11e781183b0c65a161d8da70c7d2fe0b701a979c4661877"} Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.457852 4650 scope.go:117] "RemoveContainer" containerID="26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.457964 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.471727 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-config-data" (OuterVolumeSpecName: "config-data") pod "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" (UID: "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.473297 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.480076 4650 generic.go:334] "Generic (PLEG): container finished" podID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerID="3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80" exitCode=0 Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.480518 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.481279 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a","Type":"ContainerDied","Data":"3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80"} Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.481306 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3d4897b8-cc8b-40cd-a14c-6ae66f5f606a","Type":"ContainerDied","Data":"6b16e36c65efd3a9702222ac45fa3626468a2b2f2cc1d50aa8a5a00dd14fd6b1"} Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.481552 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" (UID: "3d4897b8-cc8b-40cd-a14c-6ae66f5f606a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.499190 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.508812 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.521535 4650 scope.go:117] "RemoveContainer" containerID="1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.557016 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.557053 4650 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.557062 4650 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.557070 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.567973 4650 scope.go:117] "RemoveContainer" containerID="f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.569774 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.600524 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-config-data" (OuterVolumeSpecName: "config-data") pod "b54bd805-2e08-4935-a7c6-8eff819c8011" (UID: "b54bd805-2e08-4935-a7c6-8eff819c8011"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.601378 4650 scope.go:117] "RemoveContainer" containerID="f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.630115 4650 scope.go:117] "RemoveContainer" containerID="26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.630583 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11\": container with ID starting with 26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11 not found: ID does not exist" containerID="26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.630616 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11"} err="failed to get container status \"26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11\": rpc error: code = NotFound desc = could not find container \"26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11\": container with ID starting with 26062ba38797c37888d0b3f38b65f93305db1e4b69981d761b8a7ada277d1e11 not found: ID does not exist" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.630636 4650 scope.go:117] "RemoveContainer" containerID="1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.630901 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465\": container with ID starting with 1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465 not found: ID does not exist" containerID="1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.630920 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465"} err="failed to get container status \"1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465\": rpc error: code = NotFound desc = could not find container \"1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465\": container with ID starting with 1481373aa5477be8cf1010a34e1c9471fef5cd26a3e00bfa03665216c075a465 not found: ID does not exist" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.630939 4650 scope.go:117] "RemoveContainer" containerID="f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.631418 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d\": container with ID starting with f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d not found: ID does not exist" containerID="f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.631440 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d"} err="failed to get container status \"f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d\": rpc error: code = NotFound desc = could not find container \"f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d\": container with ID starting with f8c7c0b77c653f0ac7e15793d95adf7df6cfd203360816957f064a380578380d not found: ID does not exist" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.631453 4650 scope.go:117] "RemoveContainer" containerID="f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.631683 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61\": container with ID starting with f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61 not found: ID does not exist" containerID="f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.631702 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61"} err="failed to get container status \"f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61\": rpc error: code = NotFound desc = could not find container \"f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61\": container with ID starting with f647e2ae1fbd703d52b971748185a6dcaf8f04259d1bcdcf863f138d4639ab61 not found: ID does not exist" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.631714 4650 scope.go:117] "RemoveContainer" containerID="3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.653470 4650 scope.go:117] "RemoveContainer" containerID="e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.659718 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.659745 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54bd805-2e08-4935-a7c6-8eff819c8011-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.677778 4650 scope.go:117] "RemoveContainer" containerID="3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.678216 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80\": container with ID starting with 3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80 not found: ID does not exist" containerID="3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.678274 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80"} err="failed to get container status \"3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80\": rpc error: code = NotFound desc = could not find container \"3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80\": container with ID starting with 3b31307a098366c33941b4f14de0988f4597d0b8fae9ca47e91159e6bc0b1d80 not found: ID does not exist" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.678293 4650 scope.go:117] "RemoveContainer" containerID="e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.678637 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054\": container with ID starting with e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054 not found: ID does not exist" containerID="e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.678660 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054"} err="failed to get container status \"e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054\": rpc error: code = NotFound desc = could not find container \"e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054\": container with ID starting with e7cef1bd0e215fd987359f06e993fdab2786cba9a285b5bcedf329f739e05054 not found: ID does not exist" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.755699 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-gts9x"] Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.760443 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-central-agent" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.760564 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-central-agent" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.760656 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="sg-core" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.760707 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="sg-core" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.760763 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-notification-agent" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.760812 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-notification-agent" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.760869 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-log" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.760928 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-log" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.761003 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-api" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761069 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-api" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.761123 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="proxy-httpd" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761170 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="proxy-httpd" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761417 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-notification-agent" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761481 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="ceilometer-central-agent" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761544 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-log" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761603 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="sg-core" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761661 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" containerName="proxy-httpd" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.761711 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" containerName="nova-api-api" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.762459 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.770277 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.771007 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.775661 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gts9x"] Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.862562 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.864170 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ptkm\" (UniqueName: \"kubernetes.io/projected/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-kube-api-access-7ptkm\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.864267 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.864381 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-config-data\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.864419 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-scripts\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.883133 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.889457 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.966017 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.966390 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-config-data\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.966429 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-scripts\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.966475 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ptkm\" (UniqueName: \"kubernetes.io/projected/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-kube-api-access-7ptkm\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.972767 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.989827 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.990563 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.991185 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-scripts\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.991588 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-config-data\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.996605 4650 scope.go:117] "RemoveContainer" containerID="63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" Feb 01 07:44:56 crc kubenswrapper[4650]: I0201 07:44:56.996631 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:44:56 crc kubenswrapper[4650]: E0201 07:44:56.996875 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.014660 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.014947 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.014950 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.049216 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.068895 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.068967 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk2rf\" (UniqueName: \"kubernetes.io/projected/9d9859b9-fb15-4385-bcc3-b5d6044750dd-kube-api-access-rk2rf\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.069002 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.069103 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-config-data\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.069147 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-scripts\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.069173 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.069255 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d9859b9-fb15-4385-bcc3-b5d6044750dd-log-httpd\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.069286 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d9859b9-fb15-4385-bcc3-b5d6044750dd-run-httpd\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.072618 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ptkm\" (UniqueName: \"kubernetes.io/projected/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-kube-api-access-7ptkm\") pod \"nova-cell1-cell-mapping-gts9x\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.091073 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.100462 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.125145 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.127081 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.140086 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.150601 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.150968 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.151556 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170512 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d9859b9-fb15-4385-bcc3-b5d6044750dd-log-httpd\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170564 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d9859b9-fb15-4385-bcc3-b5d6044750dd-run-httpd\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170603 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170644 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk2rf\" (UniqueName: \"kubernetes.io/projected/9d9859b9-fb15-4385-bcc3-b5d6044750dd-kube-api-access-rk2rf\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170673 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170733 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-config-data\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170767 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-scripts\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.170786 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.171643 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d9859b9-fb15-4385-bcc3-b5d6044750dd-log-httpd\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.171904 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d9859b9-fb15-4385-bcc3-b5d6044750dd-run-httpd\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.176053 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-config-data\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.179537 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.179701 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-scripts\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.184699 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.195132 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d9859b9-fb15-4385-bcc3-b5d6044750dd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.217045 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk2rf\" (UniqueName: \"kubernetes.io/projected/9d9859b9-fb15-4385-bcc3-b5d6044750dd-kube-api-access-rk2rf\") pod \"ceilometer-0\" (UID: \"9d9859b9-fb15-4385-bcc3-b5d6044750dd\") " pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.272008 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-public-tls-certs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.272106 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520a4137-6134-409d-993e-899aa18fbd26-logs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.272126 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.272144 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-config-data\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.272190 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-internal-tls-certs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.272214 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvsqt\" (UniqueName: \"kubernetes.io/projected/520a4137-6134-409d-993e-899aa18fbd26-kube-api-access-dvsqt\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.374683 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-public-tls-certs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.374749 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520a4137-6134-409d-993e-899aa18fbd26-logs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.374778 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.374794 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-config-data\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.374853 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-internal-tls-certs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.374881 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvsqt\" (UniqueName: \"kubernetes.io/projected/520a4137-6134-409d-993e-899aa18fbd26-kube-api-access-dvsqt\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.379464 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520a4137-6134-409d-993e-899aa18fbd26-logs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.380237 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.380396 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-public-tls-certs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.385090 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-internal-tls-certs\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.389906 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-config-data\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.403849 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvsqt\" (UniqueName: \"kubernetes.io/projected/520a4137-6134-409d-993e-899aa18fbd26-kube-api-access-dvsqt\") pod \"nova-api-0\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.513549 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.534176 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.747678 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gts9x"] Feb 01 07:44:57 crc kubenswrapper[4650]: W0201 07:44:57.758506 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9feff00_76d8_4b55_a86c_5b5aabd5e7a6.slice/crio-828385ff670e35268b48f486b2255d44c1f505c65d2ad49a517e28bc293e4bc1 WatchSource:0}: Error finding container 828385ff670e35268b48f486b2255d44c1f505c65d2ad49a517e28bc293e4bc1: Status 404 returned error can't find the container with id 828385ff670e35268b48f486b2255d44c1f505c65d2ad49a517e28bc293e4bc1 Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.977372 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d4897b8-cc8b-40cd-a14c-6ae66f5f606a" path="/var/lib/kubelet/pods/3d4897b8-cc8b-40cd-a14c-6ae66f5f606a/volumes" Feb 01 07:44:57 crc kubenswrapper[4650]: I0201 07:44:57.978417 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b54bd805-2e08-4935-a7c6-8eff819c8011" path="/var/lib/kubelet/pods/b54bd805-2e08-4935-a7c6-8eff819c8011/volumes" Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.002872 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 01 07:44:58 crc kubenswrapper[4650]: W0201 07:44:58.005344 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d9859b9_fb15_4385_bcc3_b5d6044750dd.slice/crio-a30cce87f35d5a75fdf75c8677849310c03c9492fbb6c4815ce2129f0f35ed43 WatchSource:0}: Error finding container a30cce87f35d5a75fdf75c8677849310c03c9492fbb6c4815ce2129f0f35ed43: Status 404 returned error can't find the container with id a30cce87f35d5a75fdf75c8677849310c03c9492fbb6c4815ce2129f0f35ed43 Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.093361 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.552522 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gts9x" event={"ID":"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6","Type":"ContainerStarted","Data":"7ca780b1af15e2db895312a393c53206efc3db2dfc95bafaf856f097f3efbb93"} Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.552804 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gts9x" event={"ID":"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6","Type":"ContainerStarted","Data":"828385ff670e35268b48f486b2255d44c1f505c65d2ad49a517e28bc293e4bc1"} Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.562907 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d9859b9-fb15-4385-bcc3-b5d6044750dd","Type":"ContainerStarted","Data":"a30cce87f35d5a75fdf75c8677849310c03c9492fbb6c4815ce2129f0f35ed43"} Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.565622 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"520a4137-6134-409d-993e-899aa18fbd26","Type":"ContainerStarted","Data":"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63"} Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.565664 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"520a4137-6134-409d-993e-899aa18fbd26","Type":"ContainerStarted","Data":"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82"} Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.565692 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"520a4137-6134-409d-993e-899aa18fbd26","Type":"ContainerStarted","Data":"2d81e6245e940e30a36c1414868a54b52d58fc4efce41302fab8aee3f3245501"} Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.576404 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-gts9x" podStartSLOduration=2.576388904 podStartE2EDuration="2.576388904s" podCreationTimestamp="2026-02-01 07:44:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:58.570851159 +0000 UTC m=+1297.293949404" watchObservedRunningTime="2026-02-01 07:44:58.576388904 +0000 UTC m=+1297.299487149" Feb 01 07:44:58 crc kubenswrapper[4650]: I0201 07:44:58.605394 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.605376592 podStartE2EDuration="2.605376592s" podCreationTimestamp="2026-02-01 07:44:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:44:58.599279943 +0000 UTC m=+1297.322378188" watchObservedRunningTime="2026-02-01 07:44:58.605376592 +0000 UTC m=+1297.328474837" Feb 01 07:44:59 crc kubenswrapper[4650]: I0201 07:44:59.575703 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d9859b9-fb15-4385-bcc3-b5d6044750dd","Type":"ContainerStarted","Data":"95699d122e34dd71ee7e2028d7a4ffbff9cd5154886f7a3bf1b81f9f8998e640"} Feb 01 07:44:59 crc kubenswrapper[4650]: I0201 07:44:59.937165 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5459cb87c-blvph" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.011365 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6ccb6797-25srx"] Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.011921 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" podUID="474466cd-43fb-4e2f-8d45-c782ece71569" containerName="dnsmasq-dns" containerID="cri-o://d20d45fcbd61e76a263e790789888efdf66c9aa7477c86b0bd53356884706e37" gracePeriod=10 Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.155882 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz"] Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.157367 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.160246 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.165943 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.200523 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz"] Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.356823 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-secret-volume\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.356911 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k8mg\" (UniqueName: \"kubernetes.io/projected/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-kube-api-access-9k8mg\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.356967 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-config-volume\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.462936 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-config-volume\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.463577 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-secret-volume\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.463681 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k8mg\" (UniqueName: \"kubernetes.io/projected/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-kube-api-access-9k8mg\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.464113 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-config-volume\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.470973 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-secret-volume\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.498251 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k8mg\" (UniqueName: \"kubernetes.io/projected/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-kube-api-access-9k8mg\") pod \"collect-profiles-29498865-k2vfz\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.533050 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.609009 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d9859b9-fb15-4385-bcc3-b5d6044750dd","Type":"ContainerStarted","Data":"91121780e457e0d70a260e502b5edfc06fc6113df51f26dfdd38e247eaf5e910"} Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.609066 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d9859b9-fb15-4385-bcc3-b5d6044750dd","Type":"ContainerStarted","Data":"845bca26dc4ae586cca8d8a247863edf841aa56cd4e880a4d022fde41fcb81bb"} Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.610986 4650 generic.go:334] "Generic (PLEG): container finished" podID="474466cd-43fb-4e2f-8d45-c782ece71569" containerID="d20d45fcbd61e76a263e790789888efdf66c9aa7477c86b0bd53356884706e37" exitCode=0 Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.611657 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" event={"ID":"474466cd-43fb-4e2f-8d45-c782ece71569","Type":"ContainerDied","Data":"d20d45fcbd61e76a263e790789888efdf66c9aa7477c86b0bd53356884706e37"} Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.651541 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.772551 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmqr9\" (UniqueName: \"kubernetes.io/projected/474466cd-43fb-4e2f-8d45-c782ece71569-kube-api-access-vmqr9\") pod \"474466cd-43fb-4e2f-8d45-c782ece71569\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.772884 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-dns-svc\") pod \"474466cd-43fb-4e2f-8d45-c782ece71569\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.772963 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-sb\") pod \"474466cd-43fb-4e2f-8d45-c782ece71569\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.773048 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-config\") pod \"474466cd-43fb-4e2f-8d45-c782ece71569\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.773144 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-nb\") pod \"474466cd-43fb-4e2f-8d45-c782ece71569\" (UID: \"474466cd-43fb-4e2f-8d45-c782ece71569\") " Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.783152 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/474466cd-43fb-4e2f-8d45-c782ece71569-kube-api-access-vmqr9" (OuterVolumeSpecName: "kube-api-access-vmqr9") pod "474466cd-43fb-4e2f-8d45-c782ece71569" (UID: "474466cd-43fb-4e2f-8d45-c782ece71569"). InnerVolumeSpecName "kube-api-access-vmqr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.833821 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "474466cd-43fb-4e2f-8d45-c782ece71569" (UID: "474466cd-43fb-4e2f-8d45-c782ece71569"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.845937 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-config" (OuterVolumeSpecName: "config") pod "474466cd-43fb-4e2f-8d45-c782ece71569" (UID: "474466cd-43fb-4e2f-8d45-c782ece71569"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.860550 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "474466cd-43fb-4e2f-8d45-c782ece71569" (UID: "474466cd-43fb-4e2f-8d45-c782ece71569"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.876577 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmqr9\" (UniqueName: \"kubernetes.io/projected/474466cd-43fb-4e2f-8d45-c782ece71569-kube-api-access-vmqr9\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.876604 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.876613 4650 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-config\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.876621 4650 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.896060 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "474466cd-43fb-4e2f-8d45-c782ece71569" (UID: "474466cd-43fb-4e2f-8d45-c782ece71569"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:45:00 crc kubenswrapper[4650]: I0201 07:45:00.977836 4650 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/474466cd-43fb-4e2f-8d45-c782ece71569-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.185463 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz"] Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.623123 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" event={"ID":"474466cd-43fb-4e2f-8d45-c782ece71569","Type":"ContainerDied","Data":"e652a96a124f57241cd4fa60d7612c4fb0d988d4d4dfa818cb0916f5526bcebe"} Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.624428 4650 scope.go:117] "RemoveContainer" containerID="d20d45fcbd61e76a263e790789888efdf66c9aa7477c86b0bd53356884706e37" Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.623166 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6ccb6797-25srx" Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.625365 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" event={"ID":"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169","Type":"ContainerStarted","Data":"80ab811f326fbd81151a9b34deb24e214fbc145656bb5f2bda7e8e95d8a2ad2c"} Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.625461 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" event={"ID":"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169","Type":"ContainerStarted","Data":"c2211b1529d87128e6c655d883790698009c2ae984391a19efdb6f4e9672e9fb"} Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.642774 4650 scope.go:117] "RemoveContainer" containerID="e9f82b81836da56347f6486bdc715b67ef24b6ab809038dc3b3d1ec85df65201" Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.651804 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" podStartSLOduration=1.651786513 podStartE2EDuration="1.651786513s" podCreationTimestamp="2026-02-01 07:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:45:01.643980009 +0000 UTC m=+1300.367078254" watchObservedRunningTime="2026-02-01 07:45:01.651786513 +0000 UTC m=+1300.374884758" Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.673129 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6ccb6797-25srx"] Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.688199 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c6ccb6797-25srx"] Feb 01 07:45:01 crc kubenswrapper[4650]: I0201 07:45:01.976089 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="474466cd-43fb-4e2f-8d45-c782ece71569" path="/var/lib/kubelet/pods/474466cd-43fb-4e2f-8d45-c782ece71569/volumes" Feb 01 07:45:02 crc kubenswrapper[4650]: I0201 07:45:02.637502 4650 generic.go:334] "Generic (PLEG): container finished" podID="fbd3e169-7dc4-49e7-b6f3-dd16b0e01169" containerID="80ab811f326fbd81151a9b34deb24e214fbc145656bb5f2bda7e8e95d8a2ad2c" exitCode=0 Feb 01 07:45:02 crc kubenswrapper[4650]: I0201 07:45:02.637574 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" event={"ID":"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169","Type":"ContainerDied","Data":"80ab811f326fbd81151a9b34deb24e214fbc145656bb5f2bda7e8e95d8a2ad2c"} Feb 01 07:45:03 crc kubenswrapper[4650]: I0201 07:45:03.651652 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d9859b9-fb15-4385-bcc3-b5d6044750dd","Type":"ContainerStarted","Data":"6a82f3cbe69d078572fbb9301d4a041a4b1a697f2e0150132709ad9fbc1959f9"} Feb 01 07:45:03 crc kubenswrapper[4650]: I0201 07:45:03.652258 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 01 07:45:03 crc kubenswrapper[4650]: I0201 07:45:03.699021 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.740590879 podStartE2EDuration="7.698993427s" podCreationTimestamp="2026-02-01 07:44:56 +0000 UTC" firstStartedPulling="2026-02-01 07:44:58.008686293 +0000 UTC m=+1296.731784578" lastFinishedPulling="2026-02-01 07:45:02.967088891 +0000 UTC m=+1301.690187126" observedRunningTime="2026-02-01 07:45:03.698073393 +0000 UTC m=+1302.421171638" watchObservedRunningTime="2026-02-01 07:45:03.698993427 +0000 UTC m=+1302.422091712" Feb 01 07:45:03 crc kubenswrapper[4650]: I0201 07:45:03.966187 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:45:03 crc kubenswrapper[4650]: I0201 07:45:03.966510 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:45:03 crc kubenswrapper[4650]: I0201 07:45:03.966620 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:45:03 crc kubenswrapper[4650]: E0201 07:45:03.966905 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.023122 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.135350 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-secret-volume\") pod \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.135398 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-config-volume\") pod \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.135465 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9k8mg\" (UniqueName: \"kubernetes.io/projected/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-kube-api-access-9k8mg\") pod \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\" (UID: \"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169\") " Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.136056 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-config-volume" (OuterVolumeSpecName: "config-volume") pod "fbd3e169-7dc4-49e7-b6f3-dd16b0e01169" (UID: "fbd3e169-7dc4-49e7-b6f3-dd16b0e01169"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.140958 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fbd3e169-7dc4-49e7-b6f3-dd16b0e01169" (UID: "fbd3e169-7dc4-49e7-b6f3-dd16b0e01169"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.146834 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-kube-api-access-9k8mg" (OuterVolumeSpecName: "kube-api-access-9k8mg") pod "fbd3e169-7dc4-49e7-b6f3-dd16b0e01169" (UID: "fbd3e169-7dc4-49e7-b6f3-dd16b0e01169"). InnerVolumeSpecName "kube-api-access-9k8mg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.238269 4650 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.238304 4650 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-config-volume\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.238313 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9k8mg\" (UniqueName: \"kubernetes.io/projected/fbd3e169-7dc4-49e7-b6f3-dd16b0e01169-kube-api-access-9k8mg\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.668191 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.668190 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498865-k2vfz" event={"ID":"fbd3e169-7dc4-49e7-b6f3-dd16b0e01169","Type":"ContainerDied","Data":"c2211b1529d87128e6c655d883790698009c2ae984391a19efdb6f4e9672e9fb"} Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.668383 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2211b1529d87128e6c655d883790698009c2ae984391a19efdb6f4e9672e9fb" Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.671757 4650 generic.go:334] "Generic (PLEG): container finished" podID="c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" containerID="7ca780b1af15e2db895312a393c53206efc3db2dfc95bafaf856f097f3efbb93" exitCode=0 Feb 01 07:45:04 crc kubenswrapper[4650]: I0201 07:45:04.671882 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gts9x" event={"ID":"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6","Type":"ContainerDied","Data":"7ca780b1af15e2db895312a393c53206efc3db2dfc95bafaf856f097f3efbb93"} Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.165086 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.288062 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ptkm\" (UniqueName: \"kubernetes.io/projected/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-kube-api-access-7ptkm\") pod \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.288192 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-combined-ca-bundle\") pod \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.288477 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-scripts\") pod \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.288526 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-config-data\") pod \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\" (UID: \"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6\") " Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.294832 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-scripts" (OuterVolumeSpecName: "scripts") pod "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" (UID: "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.296425 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-kube-api-access-7ptkm" (OuterVolumeSpecName: "kube-api-access-7ptkm") pod "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" (UID: "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6"). InnerVolumeSpecName "kube-api-access-7ptkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.322289 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-config-data" (OuterVolumeSpecName: "config-data") pod "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" (UID: "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.323185 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" (UID: "c9feff00-76d8-4b55-a86c-5b5aabd5e7a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.390754 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.390785 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ptkm\" (UniqueName: \"kubernetes.io/projected/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-kube-api-access-7ptkm\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.390799 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.390811 4650 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6-scripts\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.696264 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gts9x" event={"ID":"c9feff00-76d8-4b55-a86c-5b5aabd5e7a6","Type":"ContainerDied","Data":"828385ff670e35268b48f486b2255d44c1f505c65d2ad49a517e28bc293e4bc1"} Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.696318 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="828385ff670e35268b48f486b2255d44c1f505c65d2ad49a517e28bc293e4bc1" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.696336 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gts9x" Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.913404 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.913969 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-log" containerID="cri-o://a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82" gracePeriod=30 Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.914053 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-api" containerID="cri-o://2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63" gracePeriod=30 Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.926987 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.927249 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6915bfe3-bba1-4976-a7c0-18129dae5c0c" containerName="nova-scheduler-scheduler" containerID="cri-o://719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" gracePeriod=30 Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.990100 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.990341 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-log" containerID="cri-o://48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf" gracePeriod=30 Feb 01 07:45:06 crc kubenswrapper[4650]: I0201 07:45:06.990494 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-metadata" containerID="cri-o://4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98" gracePeriod=30 Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.161692 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.161765 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.419458 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.437204 4650 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.448538 4650 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.450823 4650 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.450863 4650 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="6915bfe3-bba1-4976-a7c0-18129dae5c0c" containerName="nova-scheduler-scheduler" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.515115 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-public-tls-certs\") pod \"520a4137-6134-409d-993e-899aa18fbd26\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.515206 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvsqt\" (UniqueName: \"kubernetes.io/projected/520a4137-6134-409d-993e-899aa18fbd26-kube-api-access-dvsqt\") pod \"520a4137-6134-409d-993e-899aa18fbd26\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.515292 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-config-data\") pod \"520a4137-6134-409d-993e-899aa18fbd26\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.515331 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-internal-tls-certs\") pod \"520a4137-6134-409d-993e-899aa18fbd26\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.515358 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-combined-ca-bundle\") pod \"520a4137-6134-409d-993e-899aa18fbd26\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.515399 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520a4137-6134-409d-993e-899aa18fbd26-logs\") pod \"520a4137-6134-409d-993e-899aa18fbd26\" (UID: \"520a4137-6134-409d-993e-899aa18fbd26\") " Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.516264 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/520a4137-6134-409d-993e-899aa18fbd26-logs" (OuterVolumeSpecName: "logs") pod "520a4137-6134-409d-993e-899aa18fbd26" (UID: "520a4137-6134-409d-993e-899aa18fbd26"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.536311 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/520a4137-6134-409d-993e-899aa18fbd26-kube-api-access-dvsqt" (OuterVolumeSpecName: "kube-api-access-dvsqt") pod "520a4137-6134-409d-993e-899aa18fbd26" (UID: "520a4137-6134-409d-993e-899aa18fbd26"). InnerVolumeSpecName "kube-api-access-dvsqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.539565 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "520a4137-6134-409d-993e-899aa18fbd26" (UID: "520a4137-6134-409d-993e-899aa18fbd26"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.551122 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-config-data" (OuterVolumeSpecName: "config-data") pod "520a4137-6134-409d-993e-899aa18fbd26" (UID: "520a4137-6134-409d-993e-899aa18fbd26"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.567387 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "520a4137-6134-409d-993e-899aa18fbd26" (UID: "520a4137-6134-409d-993e-899aa18fbd26"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.574052 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "520a4137-6134-409d-993e-899aa18fbd26" (UID: "520a4137-6134-409d-993e-899aa18fbd26"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.617833 4650 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.617864 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvsqt\" (UniqueName: \"kubernetes.io/projected/520a4137-6134-409d-993e-899aa18fbd26-kube-api-access-dvsqt\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.617874 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.617883 4650 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.617894 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520a4137-6134-409d-993e-899aa18fbd26-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.617902 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520a4137-6134-409d-993e-899aa18fbd26-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.705735 4650 generic.go:334] "Generic (PLEG): container finished" podID="56a63441-07a6-4b3c-bee6-ccc803825470" containerID="48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf" exitCode=143 Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.705802 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"56a63441-07a6-4b3c-bee6-ccc803825470","Type":"ContainerDied","Data":"48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf"} Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.707408 4650 generic.go:334] "Generic (PLEG): container finished" podID="520a4137-6134-409d-993e-899aa18fbd26" containerID="2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63" exitCode=0 Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.707427 4650 generic.go:334] "Generic (PLEG): container finished" podID="520a4137-6134-409d-993e-899aa18fbd26" containerID="a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82" exitCode=143 Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.707440 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"520a4137-6134-409d-993e-899aa18fbd26","Type":"ContainerDied","Data":"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63"} Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.707455 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"520a4137-6134-409d-993e-899aa18fbd26","Type":"ContainerDied","Data":"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82"} Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.707464 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"520a4137-6134-409d-993e-899aa18fbd26","Type":"ContainerDied","Data":"2d81e6245e940e30a36c1414868a54b52d58fc4efce41302fab8aee3f3245501"} Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.707479 4650 scope.go:117] "RemoveContainer" containerID="2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.707599 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.737116 4650 scope.go:117] "RemoveContainer" containerID="a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.743636 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.755694 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.756543 4650 scope.go:117] "RemoveContainer" containerID="2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.757022 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63\": container with ID starting with 2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63 not found: ID does not exist" containerID="2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.757063 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63"} err="failed to get container status \"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63\": rpc error: code = NotFound desc = could not find container \"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63\": container with ID starting with 2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63 not found: ID does not exist" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.757090 4650 scope.go:117] "RemoveContainer" containerID="a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.757502 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82\": container with ID starting with a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82 not found: ID does not exist" containerID="a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.757522 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82"} err="failed to get container status \"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82\": rpc error: code = NotFound desc = could not find container \"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82\": container with ID starting with a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82 not found: ID does not exist" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.757536 4650 scope.go:117] "RemoveContainer" containerID="2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.758740 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63"} err="failed to get container status \"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63\": rpc error: code = NotFound desc = could not find container \"2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63\": container with ID starting with 2a4dee0524ee2079308a29d0b3ed00aa417ac63402ec7cb4cb8aacc2b34b9d63 not found: ID does not exist" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.758768 4650 scope.go:117] "RemoveContainer" containerID="a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.761552 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82"} err="failed to get container status \"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82\": rpc error: code = NotFound desc = could not find container \"a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82\": container with ID starting with a5f621f760394a2030c4c3449e99b91f08d9269d70b8da76b214cdf61b0b4d82 not found: ID does not exist" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769306 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.769665 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="474466cd-43fb-4e2f-8d45-c782ece71569" containerName="dnsmasq-dns" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769682 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="474466cd-43fb-4e2f-8d45-c782ece71569" containerName="dnsmasq-dns" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.769700 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-api" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769706 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-api" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.769722 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" containerName="nova-manage" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769728 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" containerName="nova-manage" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.769737 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="474466cd-43fb-4e2f-8d45-c782ece71569" containerName="init" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769745 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="474466cd-43fb-4e2f-8d45-c782ece71569" containerName="init" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.769761 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-log" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769771 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-log" Feb 01 07:45:07 crc kubenswrapper[4650]: E0201 07:45:07.769801 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbd3e169-7dc4-49e7-b6f3-dd16b0e01169" containerName="collect-profiles" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769807 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbd3e169-7dc4-49e7-b6f3-dd16b0e01169" containerName="collect-profiles" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769968 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-api" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769980 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="520a4137-6134-409d-993e-899aa18fbd26" containerName="nova-api-log" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.769995 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" containerName="nova-manage" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.770006 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbd3e169-7dc4-49e7-b6f3-dd16b0e01169" containerName="collect-profiles" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.770018 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="474466cd-43fb-4e2f-8d45-c782ece71569" containerName="dnsmasq-dns" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.771704 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.773657 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.773738 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.776573 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.822278 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.822336 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.822418 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-config-data\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.822476 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b16176d2-df87-458e-80c6-44d95cc29889-logs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.822500 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6znm\" (UniqueName: \"kubernetes.io/projected/b16176d2-df87-458e-80c6-44d95cc29889-kube-api-access-w6znm\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.822520 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-public-tls-certs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.830991 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.924467 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-config-data\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.924553 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b16176d2-df87-458e-80c6-44d95cc29889-logs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.924581 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6znm\" (UniqueName: \"kubernetes.io/projected/b16176d2-df87-458e-80c6-44d95cc29889-kube-api-access-w6znm\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.924603 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-public-tls-certs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.924667 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.924687 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.926243 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b16176d2-df87-458e-80c6-44d95cc29889-logs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.929236 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-config-data\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.929725 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.929784 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-public-tls-certs\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.931064 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16176d2-df87-458e-80c6-44d95cc29889-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.944284 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6znm\" (UniqueName: \"kubernetes.io/projected/b16176d2-df87-458e-80c6-44d95cc29889-kube-api-access-w6znm\") pod \"nova-api-0\" (UID: \"b16176d2-df87-458e-80c6-44d95cc29889\") " pod="openstack/nova-api-0" Feb 01 07:45:07 crc kubenswrapper[4650]: I0201 07:45:07.980396 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="520a4137-6134-409d-993e-899aa18fbd26" path="/var/lib/kubelet/pods/520a4137-6134-409d-993e-899aa18fbd26/volumes" Feb 01 07:45:08 crc kubenswrapper[4650]: I0201 07:45:08.087648 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 01 07:45:08 crc kubenswrapper[4650]: I0201 07:45:08.619253 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 01 07:45:08 crc kubenswrapper[4650]: I0201 07:45:08.717800 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b16176d2-df87-458e-80c6-44d95cc29889","Type":"ContainerStarted","Data":"c39664c7b339084a51f15640bfd4c610e0de2ee37555a1fd17900d7af61e2c72"} Feb 01 07:45:09 crc kubenswrapper[4650]: I0201 07:45:09.734612 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b16176d2-df87-458e-80c6-44d95cc29889","Type":"ContainerStarted","Data":"a58a04c0050d6e9d9e225dd0408e5c290711241c7d1f17fcdafc620a7fc1e10e"} Feb 01 07:45:09 crc kubenswrapper[4650]: I0201 07:45:09.734984 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b16176d2-df87-458e-80c6-44d95cc29889","Type":"ContainerStarted","Data":"c2b8d089ba285e8790f8e817eb980179949ca0669ed45a90a9bb5def0309ffdd"} Feb 01 07:45:09 crc kubenswrapper[4650]: I0201 07:45:09.811320 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.811295739 podStartE2EDuration="2.811295739s" podCreationTimestamp="2026-02-01 07:45:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:45:09.784615712 +0000 UTC m=+1308.507713977" watchObservedRunningTime="2026-02-01 07:45:09.811295739 +0000 UTC m=+1308.534393994" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.610019 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.698270 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbggq\" (UniqueName: \"kubernetes.io/projected/56a63441-07a6-4b3c-bee6-ccc803825470-kube-api-access-hbggq\") pod \"56a63441-07a6-4b3c-bee6-ccc803825470\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.698852 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-combined-ca-bundle\") pod \"56a63441-07a6-4b3c-bee6-ccc803825470\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.699414 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56a63441-07a6-4b3c-bee6-ccc803825470-logs\") pod \"56a63441-07a6-4b3c-bee6-ccc803825470\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.699553 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-nova-metadata-tls-certs\") pod \"56a63441-07a6-4b3c-bee6-ccc803825470\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.699735 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-config-data\") pod \"56a63441-07a6-4b3c-bee6-ccc803825470\" (UID: \"56a63441-07a6-4b3c-bee6-ccc803825470\") " Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.699808 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56a63441-07a6-4b3c-bee6-ccc803825470-logs" (OuterVolumeSpecName: "logs") pod "56a63441-07a6-4b3c-bee6-ccc803825470" (UID: "56a63441-07a6-4b3c-bee6-ccc803825470"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.700517 4650 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56a63441-07a6-4b3c-bee6-ccc803825470-logs\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.704941 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56a63441-07a6-4b3c-bee6-ccc803825470-kube-api-access-hbggq" (OuterVolumeSpecName: "kube-api-access-hbggq") pod "56a63441-07a6-4b3c-bee6-ccc803825470" (UID: "56a63441-07a6-4b3c-bee6-ccc803825470"). InnerVolumeSpecName "kube-api-access-hbggq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.730700 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-config-data" (OuterVolumeSpecName: "config-data") pod "56a63441-07a6-4b3c-bee6-ccc803825470" (UID: "56a63441-07a6-4b3c-bee6-ccc803825470"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.745364 4650 generic.go:334] "Generic (PLEG): container finished" podID="56a63441-07a6-4b3c-bee6-ccc803825470" containerID="4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98" exitCode=0 Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.746335 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.746736 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"56a63441-07a6-4b3c-bee6-ccc803825470","Type":"ContainerDied","Data":"4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98"} Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.746757 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"56a63441-07a6-4b3c-bee6-ccc803825470","Type":"ContainerDied","Data":"863bf0ccf6fc2cf5d0f07ff353db639b4fc717c4258e6c8775c7196fe526bd8e"} Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.746771 4650 scope.go:117] "RemoveContainer" containerID="4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.753086 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56a63441-07a6-4b3c-bee6-ccc803825470" (UID: "56a63441-07a6-4b3c-bee6-ccc803825470"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.789285 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "56a63441-07a6-4b3c-bee6-ccc803825470" (UID: "56a63441-07a6-4b3c-bee6-ccc803825470"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.797904 4650 scope.go:117] "RemoveContainer" containerID="48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.803317 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.803345 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbggq\" (UniqueName: \"kubernetes.io/projected/56a63441-07a6-4b3c-bee6-ccc803825470-kube-api-access-hbggq\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.803358 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.803369 4650 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/56a63441-07a6-4b3c-bee6-ccc803825470-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.819440 4650 scope.go:117] "RemoveContainer" containerID="4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98" Feb 01 07:45:10 crc kubenswrapper[4650]: E0201 07:45:10.820417 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98\": container with ID starting with 4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98 not found: ID does not exist" containerID="4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.820458 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98"} err="failed to get container status \"4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98\": rpc error: code = NotFound desc = could not find container \"4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98\": container with ID starting with 4758c97cfe821a4579e3f979673ce7577fdd0c2a8bd533f026001a673d4b4a98 not found: ID does not exist" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.820482 4650 scope.go:117] "RemoveContainer" containerID="48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf" Feb 01 07:45:10 crc kubenswrapper[4650]: E0201 07:45:10.820711 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf\": container with ID starting with 48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf not found: ID does not exist" containerID="48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.820737 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf"} err="failed to get container status \"48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf\": rpc error: code = NotFound desc = could not find container \"48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf\": container with ID starting with 48fead3762663a5df81dacfcd7a376c3dc8691417335ae4a56681890a7faeecf not found: ID does not exist" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.965676 4650 scope.go:117] "RemoveContainer" containerID="63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" Feb 01 07:45:10 crc kubenswrapper[4650]: I0201 07:45:10.965707 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:10 crc kubenswrapper[4650]: E0201 07:45:10.965944 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.083813 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.104401 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.129243 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:45:11 crc kubenswrapper[4650]: E0201 07:45:11.131105 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-log" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.131126 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-log" Feb 01 07:45:11 crc kubenswrapper[4650]: E0201 07:45:11.131145 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-metadata" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.131152 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-metadata" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.131319 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-metadata" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.131353 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" containerName="nova-metadata-log" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.132394 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.135847 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.135995 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.160580 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.213238 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96wnl\" (UniqueName: \"kubernetes.io/projected/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-kube-api-access-96wnl\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.213297 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-config-data\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.213340 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-logs\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.213408 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.213428 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.315361 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96wnl\" (UniqueName: \"kubernetes.io/projected/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-kube-api-access-96wnl\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.315414 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-config-data\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.315455 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-logs\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.315942 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-logs\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.316012 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.316341 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.320303 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.320433 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-config-data\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.320681 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.330003 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96wnl\" (UniqueName: \"kubernetes.io/projected/9f8e0964-a78f-45f4-a5b0-8ecfaa391176-kube-api-access-96wnl\") pod \"nova-metadata-0\" (UID: \"9f8e0964-a78f-45f4-a5b0-8ecfaa391176\") " pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.462203 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.709485 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.760434 4650 generic.go:334] "Generic (PLEG): container finished" podID="6915bfe3-bba1-4976-a7c0-18129dae5c0c" containerID="719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" exitCode=0 Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.760491 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.760506 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6915bfe3-bba1-4976-a7c0-18129dae5c0c","Type":"ContainerDied","Data":"719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de"} Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.760538 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6915bfe3-bba1-4976-a7c0-18129dae5c0c","Type":"ContainerDied","Data":"bd13734201924745e45c47fa93956563f9664e22d5faf1361439219d576b6652"} Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.760577 4650 scope.go:117] "RemoveContainer" containerID="719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.787299 4650 scope.go:117] "RemoveContainer" containerID="719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" Feb 01 07:45:11 crc kubenswrapper[4650]: E0201 07:45:11.788065 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de\": container with ID starting with 719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de not found: ID does not exist" containerID="719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.788112 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de"} err="failed to get container status \"719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de\": rpc error: code = NotFound desc = could not find container \"719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de\": container with ID starting with 719f28bd31f61c77e0524873f1c2a3d1802df4773aeadb00998995149092c5de not found: ID does not exist" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.827340 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-combined-ca-bundle\") pod \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.827436 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-config-data\") pod \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.827617 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62rxn\" (UniqueName: \"kubernetes.io/projected/6915bfe3-bba1-4976-a7c0-18129dae5c0c-kube-api-access-62rxn\") pod \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\" (UID: \"6915bfe3-bba1-4976-a7c0-18129dae5c0c\") " Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.833537 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6915bfe3-bba1-4976-a7c0-18129dae5c0c-kube-api-access-62rxn" (OuterVolumeSpecName: "kube-api-access-62rxn") pod "6915bfe3-bba1-4976-a7c0-18129dae5c0c" (UID: "6915bfe3-bba1-4976-a7c0-18129dae5c0c"). InnerVolumeSpecName "kube-api-access-62rxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.851886 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-config-data" (OuterVolumeSpecName: "config-data") pod "6915bfe3-bba1-4976-a7c0-18129dae5c0c" (UID: "6915bfe3-bba1-4976-a7c0-18129dae5c0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.857321 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6915bfe3-bba1-4976-a7c0-18129dae5c0c" (UID: "6915bfe3-bba1-4976-a7c0-18129dae5c0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.929625 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62rxn\" (UniqueName: \"kubernetes.io/projected/6915bfe3-bba1-4976-a7c0-18129dae5c0c-kube-api-access-62rxn\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.929656 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.929669 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6915bfe3-bba1-4976-a7c0-18129dae5c0c-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.935197 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 01 07:45:11 crc kubenswrapper[4650]: I0201 07:45:11.977043 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56a63441-07a6-4b3c-bee6-ccc803825470" path="/var/lib/kubelet/pods/56a63441-07a6-4b3c-bee6-ccc803825470/volumes" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.085056 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.104264 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.131084 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:45:12 crc kubenswrapper[4650]: E0201 07:45:12.131512 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6915bfe3-bba1-4976-a7c0-18129dae5c0c" containerName="nova-scheduler-scheduler" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.131528 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6915bfe3-bba1-4976-a7c0-18129dae5c0c" containerName="nova-scheduler-scheduler" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.131708 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6915bfe3-bba1-4976-a7c0-18129dae5c0c" containerName="nova-scheduler-scheduler" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.132381 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.140349 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.143069 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.234315 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdsnw\" (UniqueName: \"kubernetes.io/projected/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-kube-api-access-pdsnw\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.234896 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.235070 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-config-data\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.342463 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.342905 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-config-data\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.343323 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdsnw\" (UniqueName: \"kubernetes.io/projected/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-kube-api-access-pdsnw\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.355950 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.361302 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-config-data\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.365166 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdsnw\" (UniqueName: \"kubernetes.io/projected/ac80d66e-9ac2-443c-a731-1dfbbe67e6d0-kube-api-access-pdsnw\") pod \"nova-scheduler-0\" (UID: \"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0\") " pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.455806 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.775492 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9f8e0964-a78f-45f4-a5b0-8ecfaa391176","Type":"ContainerStarted","Data":"f49015ae35dab392029fafbbcada4839add81eb9f857b52fefd71e8ff76753a8"} Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.775748 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9f8e0964-a78f-45f4-a5b0-8ecfaa391176","Type":"ContainerStarted","Data":"b2c17f52dcb28df744524bb3916a6c773552f01dfb6c8935da23e6415f8e8f17"} Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.775758 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9f8e0964-a78f-45f4-a5b0-8ecfaa391176","Type":"ContainerStarted","Data":"526c8187e4794e636bb5ee7d9c1458eebc63738b83f1da8638ac61a8dfed53b6"} Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.806066 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.805976547 podStartE2EDuration="1.805976547s" podCreationTimestamp="2026-02-01 07:45:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:45:12.789181358 +0000 UTC m=+1311.512279603" watchObservedRunningTime="2026-02-01 07:45:12.805976547 +0000 UTC m=+1311.529074842" Feb 01 07:45:12 crc kubenswrapper[4650]: I0201 07:45:12.920084 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 01 07:45:13 crc kubenswrapper[4650]: I0201 07:45:13.791012 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0","Type":"ContainerStarted","Data":"1e034e38587c52c20a48e0a085d1ededf69d93c76dc8a2a1b950937aef91cfdd"} Feb 01 07:45:13 crc kubenswrapper[4650]: I0201 07:45:13.791388 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ac80d66e-9ac2-443c-a731-1dfbbe67e6d0","Type":"ContainerStarted","Data":"7e272a6883aa0c4e022abbf43d0dd552949ea7af545f58f37c8b35c5916a2955"} Feb 01 07:45:13 crc kubenswrapper[4650]: I0201 07:45:13.820277 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.82024494 podStartE2EDuration="1.82024494s" podCreationTimestamp="2026-02-01 07:45:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 07:45:13.806048148 +0000 UTC m=+1312.529146383" watchObservedRunningTime="2026-02-01 07:45:13.82024494 +0000 UTC m=+1312.543343205" Feb 01 07:45:13 crc kubenswrapper[4650]: I0201 07:45:13.988105 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6915bfe3-bba1-4976-a7c0-18129dae5c0c" path="/var/lib/kubelet/pods/6915bfe3-bba1-4976-a7c0-18129dae5c0c/volumes" Feb 01 07:45:16 crc kubenswrapper[4650]: I0201 07:45:16.462650 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 01 07:45:16 crc kubenswrapper[4650]: I0201 07:45:16.463317 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 01 07:45:16 crc kubenswrapper[4650]: I0201 07:45:16.966178 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:45:16 crc kubenswrapper[4650]: I0201 07:45:16.966312 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:45:16 crc kubenswrapper[4650]: I0201 07:45:16.966495 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:45:16 crc kubenswrapper[4650]: E0201 07:45:16.967022 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:45:17 crc kubenswrapper[4650]: I0201 07:45:17.456078 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 01 07:45:18 crc kubenswrapper[4650]: I0201 07:45:18.088331 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 01 07:45:18 crc kubenswrapper[4650]: I0201 07:45:18.088416 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 01 07:45:19 crc kubenswrapper[4650]: I0201 07:45:19.106253 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b16176d2-df87-458e-80c6-44d95cc29889" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.210:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 01 07:45:19 crc kubenswrapper[4650]: I0201 07:45:19.106262 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b16176d2-df87-458e-80c6-44d95cc29889" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.210:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 01 07:45:21 crc kubenswrapper[4650]: I0201 07:45:21.462753 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 01 07:45:21 crc kubenswrapper[4650]: I0201 07:45:21.464838 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 01 07:45:22 crc kubenswrapper[4650]: I0201 07:45:22.456202 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 01 07:45:22 crc kubenswrapper[4650]: I0201 07:45:22.483185 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9f8e0964-a78f-45f4-a5b0-8ecfaa391176" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.211:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 01 07:45:22 crc kubenswrapper[4650]: I0201 07:45:22.483248 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9f8e0964-a78f-45f4-a5b0-8ecfaa391176" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.211:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 01 07:45:22 crc kubenswrapper[4650]: I0201 07:45:22.491719 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 01 07:45:22 crc kubenswrapper[4650]: I0201 07:45:22.941334 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 01 07:45:24 crc kubenswrapper[4650]: I0201 07:45:24.966230 4650 scope.go:117] "RemoveContainer" containerID="63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" Feb 01 07:45:24 crc kubenswrapper[4650]: I0201 07:45:24.966634 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:25 crc kubenswrapper[4650]: E0201 07:45:25.196490 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:25 crc kubenswrapper[4650]: I0201 07:45:25.935122 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45"} Feb 01 07:45:25 crc kubenswrapper[4650]: I0201 07:45:25.935651 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:45:25 crc kubenswrapper[4650]: I0201 07:45:25.936130 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:25 crc kubenswrapper[4650]: E0201 07:45:25.936502 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:26 crc kubenswrapper[4650]: I0201 07:45:26.951099 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:26 crc kubenswrapper[4650]: E0201 07:45:26.953915 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:27 crc kubenswrapper[4650]: I0201 07:45:27.538434 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 01 07:45:28 crc kubenswrapper[4650]: I0201 07:45:28.097615 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 01 07:45:28 crc kubenswrapper[4650]: I0201 07:45:28.098276 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 01 07:45:28 crc kubenswrapper[4650]: I0201 07:45:28.100672 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 01 07:45:28 crc kubenswrapper[4650]: I0201 07:45:28.112811 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 01 07:45:28 crc kubenswrapper[4650]: I0201 07:45:28.976638 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 01 07:45:28 crc kubenswrapper[4650]: I0201 07:45:28.988661 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 01 07:45:30 crc kubenswrapper[4650]: I0201 07:45:30.807270 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:45:31 crc kubenswrapper[4650]: I0201 07:45:31.472449 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 01 07:45:31 crc kubenswrapper[4650]: I0201 07:45:31.474573 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 01 07:45:31 crc kubenswrapper[4650]: I0201 07:45:31.481365 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 01 07:45:31 crc kubenswrapper[4650]: I0201 07:45:31.971845 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:45:31 crc kubenswrapper[4650]: I0201 07:45:31.971918 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:45:31 crc kubenswrapper[4650]: I0201 07:45:31.972038 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:45:31 crc kubenswrapper[4650]: E0201 07:45:31.972304 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:45:32 crc kubenswrapper[4650]: I0201 07:45:32.017077 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 01 07:45:33 crc kubenswrapper[4650]: I0201 07:45:33.805806 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:45:34 crc kubenswrapper[4650]: I0201 07:45:34.807987 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:45:36 crc kubenswrapper[4650]: I0201 07:45:36.810461 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:45:36 crc kubenswrapper[4650]: I0201 07:45:36.810943 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:45:36 crc kubenswrapper[4650]: I0201 07:45:36.812268 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:45:36 crc kubenswrapper[4650]: I0201 07:45:36.812305 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:36 crc kubenswrapper[4650]: I0201 07:45:36.812351 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" gracePeriod=30 Feb 01 07:45:36 crc kubenswrapper[4650]: I0201 07:45:36.818119 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:45:36 crc kubenswrapper[4650]: E0201 07:45:36.934953 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.069281 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" exitCode=0 Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.069323 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45"} Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.069387 4650 scope.go:117] "RemoveContainer" containerID="63e7260b59a92b226b6ab4aa787fbd82123fc98b8f89bf546bd77d0c3883551e" Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.070317 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.070383 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:37 crc kubenswrapper[4650]: E0201 07:45:37.070762 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.161566 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.161814 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.161929 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.162775 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"da08a9d98a15a08dc02cc770b99ef74f8ab41ac5f98a7b2acee0e642f45cbee1"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:45:37 crc kubenswrapper[4650]: I0201 07:45:37.162912 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://da08a9d98a15a08dc02cc770b99ef74f8ab41ac5f98a7b2acee0e642f45cbee1" gracePeriod=600 Feb 01 07:45:38 crc kubenswrapper[4650]: I0201 07:45:38.098065 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="da08a9d98a15a08dc02cc770b99ef74f8ab41ac5f98a7b2acee0e642f45cbee1" exitCode=0 Feb 01 07:45:38 crc kubenswrapper[4650]: I0201 07:45:38.098545 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"da08a9d98a15a08dc02cc770b99ef74f8ab41ac5f98a7b2acee0e642f45cbee1"} Feb 01 07:45:38 crc kubenswrapper[4650]: I0201 07:45:38.098647 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4"} Feb 01 07:45:38 crc kubenswrapper[4650]: I0201 07:45:38.098697 4650 scope.go:117] "RemoveContainer" containerID="70e34c59087428be1d52cbbc9d3e74901ae2b55868cca05d2ac2b1cb47ec233d" Feb 01 07:45:43 crc kubenswrapper[4650]: I0201 07:45:43.966249 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:45:43 crc kubenswrapper[4650]: I0201 07:45:43.966954 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:45:43 crc kubenswrapper[4650]: I0201 07:45:43.967156 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:45:43 crc kubenswrapper[4650]: E0201 07:45:43.967671 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:45:51 crc kubenswrapper[4650]: I0201 07:45:51.976292 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:45:51 crc kubenswrapper[4650]: I0201 07:45:51.976864 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:52 crc kubenswrapper[4650]: E0201 07:45:52.225393 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:52 crc kubenswrapper[4650]: I0201 07:45:52.293663 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e"} Feb 01 07:45:52 crc kubenswrapper[4650]: I0201 07:45:52.294081 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:45:52 crc kubenswrapper[4650]: I0201 07:45:52.294457 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:45:52 crc kubenswrapper[4650]: E0201 07:45:52.294953 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:53 crc kubenswrapper[4650]: I0201 07:45:53.307434 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:45:53 crc kubenswrapper[4650]: E0201 07:45:53.308435 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:54 crc kubenswrapper[4650]: I0201 07:45:54.323813 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" exitCode=1 Feb 01 07:45:54 crc kubenswrapper[4650]: I0201 07:45:54.323898 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e"} Feb 01 07:45:54 crc kubenswrapper[4650]: I0201 07:45:54.323976 4650 scope.go:117] "RemoveContainer" containerID="09163448d9f07bb9f58d2fec34d9fb766e1ac0eebcdb92af825ee3daa1f27558" Feb 01 07:45:54 crc kubenswrapper[4650]: I0201 07:45:54.324983 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:45:54 crc kubenswrapper[4650]: I0201 07:45:54.325054 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:45:54 crc kubenswrapper[4650]: E0201 07:45:54.325670 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:54 crc kubenswrapper[4650]: I0201 07:45:54.800327 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:45:55 crc kubenswrapper[4650]: I0201 07:45:55.342392 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:45:55 crc kubenswrapper[4650]: I0201 07:45:55.342423 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:45:55 crc kubenswrapper[4650]: E0201 07:45:55.342961 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:45:56 crc kubenswrapper[4650]: I0201 07:45:56.967063 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:45:56 crc kubenswrapper[4650]: I0201 07:45:56.967867 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:45:56 crc kubenswrapper[4650]: I0201 07:45:56.968084 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:45:56 crc kubenswrapper[4650]: E0201 07:45:56.968607 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:06 crc kubenswrapper[4650]: I0201 07:46:06.965658 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:46:06 crc kubenswrapper[4650]: I0201 07:46:06.966235 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:46:06 crc kubenswrapper[4650]: E0201 07:46:06.966475 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:46:07 crc kubenswrapper[4650]: I0201 07:46:07.965517 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:46:07 crc kubenswrapper[4650]: I0201 07:46:07.965603 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:46:07 crc kubenswrapper[4650]: I0201 07:46:07.965719 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:46:07 crc kubenswrapper[4650]: E0201 07:46:07.967231 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:18 crc kubenswrapper[4650]: I0201 07:46:18.965549 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:46:18 crc kubenswrapper[4650]: I0201 07:46:18.966101 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:46:18 crc kubenswrapper[4650]: I0201 07:46:18.966218 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:46:18 crc kubenswrapper[4650]: E0201 07:46:18.966598 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:21 crc kubenswrapper[4650]: I0201 07:46:21.977614 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:46:21 crc kubenswrapper[4650]: I0201 07:46:21.978153 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:46:21 crc kubenswrapper[4650]: E0201 07:46:21.978959 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:46:24 crc kubenswrapper[4650]: I0201 07:46:24.343517 4650 scope.go:117] "RemoveContainer" containerID="1440c3a7436e9f6067cfa88648c15904656524c2196d8cd41c94596bab34bd60" Feb 01 07:46:29 crc kubenswrapper[4650]: I0201 07:46:29.965607 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:46:29 crc kubenswrapper[4650]: I0201 07:46:29.966186 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:46:29 crc kubenswrapper[4650]: I0201 07:46:29.966312 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:46:30 crc kubenswrapper[4650]: I0201 07:46:30.793059 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287"} Feb 01 07:46:30 crc kubenswrapper[4650]: I0201 07:46:30.793702 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443"} Feb 01 07:46:30 crc kubenswrapper[4650]: I0201 07:46:30.793715 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc"} Feb 01 07:46:30 crc kubenswrapper[4650]: I0201 07:46:30.793738 4650 scope.go:117] "RemoveContainer" containerID="783274ea159f840f173463aa2a205f0d3be14d0c7fe3cee9708dae75f0d07b0a" Feb 01 07:46:30 crc kubenswrapper[4650]: I0201 07:46:30.793067 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" exitCode=1 Feb 01 07:46:30 crc kubenswrapper[4650]: I0201 07:46:30.794613 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:46:30 crc kubenswrapper[4650]: E0201 07:46:30.795135 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.814086 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287"} Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.814556 4650 scope.go:117] "RemoveContainer" containerID="6a898568b0a67140d6cebaf12dedc22f057d9c905979310e75bd93b038f53970" Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.814099 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" exitCode=1 Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.814659 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" exitCode=1 Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.814689 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443"} Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.815887 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.816002 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.816257 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:46:31 crc kubenswrapper[4650]: E0201 07:46:31.816889 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:31 crc kubenswrapper[4650]: I0201 07:46:31.920146 4650 scope.go:117] "RemoveContainer" containerID="ce26e94ae6eb1322a9b8225c7ac20ac03005262a420c9829375c3f42800f5a7a" Feb 01 07:46:32 crc kubenswrapper[4650]: I0201 07:46:32.965135 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:46:32 crc kubenswrapper[4650]: I0201 07:46:32.965524 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:46:32 crc kubenswrapper[4650]: E0201 07:46:32.965811 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:46:35 crc kubenswrapper[4650]: E0201 07:46:35.127660 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:46:35 crc kubenswrapper[4650]: I0201 07:46:35.869001 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:46:36 crc kubenswrapper[4650]: I0201 07:46:36.644573 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:46:36 crc kubenswrapper[4650]: E0201 07:46:36.644712 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:46:36 crc kubenswrapper[4650]: E0201 07:46:36.644990 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:48:38.644973156 +0000 UTC m=+1517.368071401 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:46:37 crc kubenswrapper[4650]: I0201 07:46:37.903645 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="50273097fa73190c557fb1b573c67450a63d38490679debfafff88dd70648dc6" exitCode=1 Feb 01 07:46:37 crc kubenswrapper[4650]: I0201 07:46:37.903718 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"50273097fa73190c557fb1b573c67450a63d38490679debfafff88dd70648dc6"} Feb 01 07:46:37 crc kubenswrapper[4650]: I0201 07:46:37.904044 4650 scope.go:117] "RemoveContainer" containerID="6f66535bdcc720ca6331a25502406a022cc11d5deb1d240c85548ae491d10847" Feb 01 07:46:37 crc kubenswrapper[4650]: I0201 07:46:37.906567 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:46:37 crc kubenswrapper[4650]: I0201 07:46:37.906628 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:46:37 crc kubenswrapper[4650]: I0201 07:46:37.906648 4650 scope.go:117] "RemoveContainer" containerID="50273097fa73190c557fb1b573c67450a63d38490679debfafff88dd70648dc6" Feb 01 07:46:37 crc kubenswrapper[4650]: I0201 07:46:37.906717 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:46:37 crc kubenswrapper[4650]: E0201 07:46:37.907240 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:46 crc kubenswrapper[4650]: I0201 07:46:46.965806 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:46:46 crc kubenswrapper[4650]: I0201 07:46:46.966392 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:46:46 crc kubenswrapper[4650]: E0201 07:46:46.966748 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:46:52 crc kubenswrapper[4650]: I0201 07:46:52.965767 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:46:52 crc kubenswrapper[4650]: I0201 07:46:52.966385 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:46:52 crc kubenswrapper[4650]: I0201 07:46:52.966416 4650 scope.go:117] "RemoveContainer" containerID="50273097fa73190c557fb1b573c67450a63d38490679debfafff88dd70648dc6" Feb 01 07:46:52 crc kubenswrapper[4650]: I0201 07:46:52.966493 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:46:53 crc kubenswrapper[4650]: E0201 07:46:53.158257 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:54 crc kubenswrapper[4650]: I0201 07:46:54.087660 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"0833a80649ab7d4f2475b1080ca5c9f01d81430b7f44565b92dc4c126f3c0af6"} Feb 01 07:46:54 crc kubenswrapper[4650]: I0201 07:46:54.088761 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:46:54 crc kubenswrapper[4650]: I0201 07:46:54.088833 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:46:54 crc kubenswrapper[4650]: I0201 07:46:54.088949 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:46:54 crc kubenswrapper[4650]: E0201 07:46:54.089322 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:46:57 crc kubenswrapper[4650]: I0201 07:46:57.967652 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:46:57 crc kubenswrapper[4650]: I0201 07:46:57.968254 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:46:58 crc kubenswrapper[4650]: E0201 07:46:58.209411 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:46:59 crc kubenswrapper[4650]: I0201 07:46:59.170139 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed"} Feb 01 07:46:59 crc kubenswrapper[4650]: I0201 07:46:59.171523 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:46:59 crc kubenswrapper[4650]: I0201 07:46:59.171662 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:46:59 crc kubenswrapper[4650]: E0201 07:46:59.171969 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:47:00 crc kubenswrapper[4650]: I0201 07:47:00.181165 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:47:00 crc kubenswrapper[4650]: E0201 07:47:00.181540 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:47:03 crc kubenswrapper[4650]: I0201 07:47:03.815569 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:47:04 crc kubenswrapper[4650]: I0201 07:47:04.809808 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:47:06 crc kubenswrapper[4650]: I0201 07:47:06.802984 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:47:08 crc kubenswrapper[4650]: I0201 07:47:08.966387 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:47:08 crc kubenswrapper[4650]: I0201 07:47:08.966766 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:47:08 crc kubenswrapper[4650]: I0201 07:47:08.966881 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:47:08 crc kubenswrapper[4650]: E0201 07:47:08.967352 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:47:09 crc kubenswrapper[4650]: I0201 07:47:09.807531 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:47:09 crc kubenswrapper[4650]: I0201 07:47:09.807617 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:47:09 crc kubenswrapper[4650]: I0201 07:47:09.808347 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:47:09 crc kubenswrapper[4650]: I0201 07:47:09.808492 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:47:09 crc kubenswrapper[4650]: I0201 07:47:09.808516 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:47:09 crc kubenswrapper[4650]: I0201 07:47:09.808545 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" gracePeriod=30 Feb 01 07:47:09 crc kubenswrapper[4650]: I0201 07:47:09.819877 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:47:10 crc kubenswrapper[4650]: E0201 07:47:10.039393 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:47:10 crc kubenswrapper[4650]: I0201 07:47:10.291532 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" exitCode=0 Feb 01 07:47:10 crc kubenswrapper[4650]: I0201 07:47:10.291634 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed"} Feb 01 07:47:10 crc kubenswrapper[4650]: I0201 07:47:10.291726 4650 scope.go:117] "RemoveContainer" containerID="3fe3ca62e87469bd5cf93a4f73117185134e93fb3822bc3f92bd52831f313d45" Feb 01 07:47:10 crc kubenswrapper[4650]: I0201 07:47:10.292534 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:47:10 crc kubenswrapper[4650]: I0201 07:47:10.292585 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:47:10 crc kubenswrapper[4650]: E0201 07:47:10.292942 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:47:23 crc kubenswrapper[4650]: I0201 07:47:23.967189 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:47:23 crc kubenswrapper[4650]: I0201 07:47:23.968172 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:47:23 crc kubenswrapper[4650]: I0201 07:47:23.968365 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:47:23 crc kubenswrapper[4650]: E0201 07:47:23.968993 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.454503 4650 scope.go:117] "RemoveContainer" containerID="32ad1182027a79711c7b05b9e87b965568175662fce9da229bc4f893ac741beb" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.512181 4650 scope.go:117] "RemoveContainer" containerID="a44f7992069ad5f19bb0410daa9f5292e190e85f2a7f8d50f7b82ee5493e1454" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.546387 4650 scope.go:117] "RemoveContainer" containerID="06616ed95f463e4ccf0b03c06cb4cd95c9607d8b526fe0b21e3e25ae58c2864a" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.581544 4650 scope.go:117] "RemoveContainer" containerID="3fe1ea57719f5cc7d5fa12a1945a5b41907c3b01e2d2c0b7ccb85779246cf245" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.640068 4650 scope.go:117] "RemoveContainer" containerID="4d37cf6bbb346b6469aff2307f4012b5e570dcb9bd8d3484f6044bba2bc5248e" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.676632 4650 scope.go:117] "RemoveContainer" containerID="79076c939504f1325536395e93ee670f57352524c1972b4dee9b8d73d9659a73" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.723464 4650 scope.go:117] "RemoveContainer" containerID="7328dc1c02bb9990f7c8772431212dc76e5892644429a10b8063d3eee2081556" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.753427 4650 scope.go:117] "RemoveContainer" containerID="0a8ef4a2aa7b93cd6fdaea9e16240a2274799d2c30db543919ffee7f3af2d11c" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.964808 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:47:24 crc kubenswrapper[4650]: I0201 07:47:24.964835 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:47:24 crc kubenswrapper[4650]: E0201 07:47:24.965068 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:47:37 crc kubenswrapper[4650]: I0201 07:47:37.161905 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:47:37 crc kubenswrapper[4650]: I0201 07:47:37.162607 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:47:37 crc kubenswrapper[4650]: I0201 07:47:37.965952 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:47:37 crc kubenswrapper[4650]: I0201 07:47:37.966434 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:47:37 crc kubenswrapper[4650]: I0201 07:47:37.966615 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:47:37 crc kubenswrapper[4650]: E0201 07:47:37.967288 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:47:39 crc kubenswrapper[4650]: I0201 07:47:39.967257 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:47:39 crc kubenswrapper[4650]: I0201 07:47:39.967721 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:47:39 crc kubenswrapper[4650]: E0201 07:47:39.968182 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:47:48 crc kubenswrapper[4650]: I0201 07:47:48.967914 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:47:48 crc kubenswrapper[4650]: I0201 07:47:48.969172 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:47:48 crc kubenswrapper[4650]: I0201 07:47:48.969548 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:47:48 crc kubenswrapper[4650]: E0201 07:47:48.970302 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:47:52 crc kubenswrapper[4650]: I0201 07:47:52.965532 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:47:52 crc kubenswrapper[4650]: I0201 07:47:52.968116 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:47:52 crc kubenswrapper[4650]: E0201 07:47:52.968629 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:02 crc kubenswrapper[4650]: I0201 07:48:02.968960 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:48:02 crc kubenswrapper[4650]: I0201 07:48:02.972142 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:48:02 crc kubenswrapper[4650]: I0201 07:48:02.972682 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:48:02 crc kubenswrapper[4650]: E0201 07:48:02.973711 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:48:06 crc kubenswrapper[4650]: I0201 07:48:06.990048 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="e6dc364e85738df90c32cbd434759e3f0e7d1ab1e42c31023453e8704d13f08b" exitCode=1 Feb 01 07:48:06 crc kubenswrapper[4650]: I0201 07:48:06.990061 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"e6dc364e85738df90c32cbd434759e3f0e7d1ab1e42c31023453e8704d13f08b"} Feb 01 07:48:06 crc kubenswrapper[4650]: I0201 07:48:06.990537 4650 scope.go:117] "RemoveContainer" containerID="092c036764bc43b6c02ff54e9eb3b67f429c327f120c40c9f66cf098fe79dc37" Feb 01 07:48:06 crc kubenswrapper[4650]: I0201 07:48:06.991107 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:48:06 crc kubenswrapper[4650]: I0201 07:48:06.991163 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:48:06 crc kubenswrapper[4650]: I0201 07:48:06.991234 4650 scope.go:117] "RemoveContainer" containerID="e6dc364e85738df90c32cbd434759e3f0e7d1ab1e42c31023453e8704d13f08b" Feb 01 07:48:06 crc kubenswrapper[4650]: I0201 07:48:06.991256 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:48:06 crc kubenswrapper[4650]: E0201 07:48:06.991621 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:48:07 crc kubenswrapper[4650]: I0201 07:48:07.161514 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:48:07 crc kubenswrapper[4650]: I0201 07:48:07.161579 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:48:07 crc kubenswrapper[4650]: I0201 07:48:07.966945 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:07 crc kubenswrapper[4650]: I0201 07:48:07.967491 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:48:07 crc kubenswrapper[4650]: E0201 07:48:07.968117 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:19 crc kubenswrapper[4650]: I0201 07:48:19.965733 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:19 crc kubenswrapper[4650]: I0201 07:48:19.966364 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:48:19 crc kubenswrapper[4650]: E0201 07:48:19.966826 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:21 crc kubenswrapper[4650]: I0201 07:48:21.979141 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:48:21 crc kubenswrapper[4650]: I0201 07:48:21.979870 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:48:21 crc kubenswrapper[4650]: I0201 07:48:21.979976 4650 scope.go:117] "RemoveContainer" containerID="e6dc364e85738df90c32cbd434759e3f0e7d1ab1e42c31023453e8704d13f08b" Feb 01 07:48:21 crc kubenswrapper[4650]: I0201 07:48:21.979986 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:48:22 crc kubenswrapper[4650]: E0201 07:48:22.197341 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:48:23 crc kubenswrapper[4650]: I0201 07:48:23.200210 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590"} Feb 01 07:48:23 crc kubenswrapper[4650]: I0201 07:48:23.201574 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:48:23 crc kubenswrapper[4650]: I0201 07:48:23.201796 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:48:23 crc kubenswrapper[4650]: I0201 07:48:23.202063 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:48:23 crc kubenswrapper[4650]: E0201 07:48:23.202658 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:48:24 crc kubenswrapper[4650]: I0201 07:48:24.929650 4650 scope.go:117] "RemoveContainer" containerID="1b32ba94858cc0c3a1436431907746a3cd185a00b7f2d436e309e29710dc850e" Feb 01 07:48:24 crc kubenswrapper[4650]: I0201 07:48:24.965417 4650 scope.go:117] "RemoveContainer" containerID="cca07a3ed44421c1c2ddae2db88b3fa3f088622070c2075bd1008da4c9b836d4" Feb 01 07:48:25 crc kubenswrapper[4650]: I0201 07:48:25.001003 4650 scope.go:117] "RemoveContainer" containerID="2d7be25a36bfba8dcfd682fcb26b455bbe9583c5afec760041cc8bcb3f5e8526" Feb 01 07:48:25 crc kubenswrapper[4650]: I0201 07:48:25.031143 4650 scope.go:117] "RemoveContainer" containerID="16e1f0a1172f0b931837eee6c283685a5230303871b51476c961200d508c6bd7" Feb 01 07:48:32 crc kubenswrapper[4650]: I0201 07:48:32.968095 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:32 crc kubenswrapper[4650]: I0201 07:48:32.971422 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:48:32 crc kubenswrapper[4650]: E0201 07:48:32.972096 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:35 crc kubenswrapper[4650]: I0201 07:48:35.966945 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:48:35 crc kubenswrapper[4650]: I0201 07:48:35.967442 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:48:35 crc kubenswrapper[4650]: I0201 07:48:35.967626 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:48:35 crc kubenswrapper[4650]: E0201 07:48:35.968233 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.161011 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.161092 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.161143 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.161955 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.162018 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" gracePeriod=600 Feb 01 07:48:37 crc kubenswrapper[4650]: E0201 07:48:37.282072 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.359145 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" exitCode=0 Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.359213 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4"} Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.359554 4650 scope.go:117] "RemoveContainer" containerID="da08a9d98a15a08dc02cc770b99ef74f8ab41ac5f98a7b2acee0e642f45cbee1" Feb 01 07:48:37 crc kubenswrapper[4650]: I0201 07:48:37.360384 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:48:37 crc kubenswrapper[4650]: E0201 07:48:37.360714 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:48:38 crc kubenswrapper[4650]: I0201 07:48:38.647285 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:48:38 crc kubenswrapper[4650]: E0201 07:48:38.647522 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:48:38 crc kubenswrapper[4650]: E0201 07:48:38.647644 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:50:40.647609764 +0000 UTC m=+1639.370708049 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:48:38 crc kubenswrapper[4650]: E0201 07:48:38.871393 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:48:39 crc kubenswrapper[4650]: I0201 07:48:39.393623 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:48:43 crc kubenswrapper[4650]: I0201 07:48:43.965480 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:43 crc kubenswrapper[4650]: I0201 07:48:43.965906 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:48:44 crc kubenswrapper[4650]: E0201 07:48:44.177523 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:44 crc kubenswrapper[4650]: I0201 07:48:44.467567 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871"} Feb 01 07:48:44 crc kubenswrapper[4650]: I0201 07:48:44.468431 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:48:44 crc kubenswrapper[4650]: I0201 07:48:44.469558 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:44 crc kubenswrapper[4650]: E0201 07:48:44.469869 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:45 crc kubenswrapper[4650]: I0201 07:48:45.483555 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" exitCode=1 Feb 01 07:48:45 crc kubenswrapper[4650]: I0201 07:48:45.483618 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871"} Feb 01 07:48:45 crc kubenswrapper[4650]: I0201 07:48:45.483659 4650 scope.go:117] "RemoveContainer" containerID="d3783b493d17229051393fde8c84a3ba89aa5c24813748f60684bf75114fa02e" Feb 01 07:48:45 crc kubenswrapper[4650]: I0201 07:48:45.485450 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:45 crc kubenswrapper[4650]: I0201 07:48:45.486704 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:48:45 crc kubenswrapper[4650]: E0201 07:48:45.487234 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:45 crc kubenswrapper[4650]: I0201 07:48:45.800207 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:48:46 crc kubenswrapper[4650]: I0201 07:48:46.495291 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:46 crc kubenswrapper[4650]: I0201 07:48:46.495323 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:48:46 crc kubenswrapper[4650]: E0201 07:48:46.495718 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:47 crc kubenswrapper[4650]: I0201 07:48:47.507634 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:48:47 crc kubenswrapper[4650]: I0201 07:48:47.508095 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:48:47 crc kubenswrapper[4650]: E0201 07:48:47.508556 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:48:48 crc kubenswrapper[4650]: I0201 07:48:48.966326 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:48:48 crc kubenswrapper[4650]: I0201 07:48:48.966736 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:48:48 crc kubenswrapper[4650]: I0201 07:48:48.966917 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:48:48 crc kubenswrapper[4650]: E0201 07:48:48.967545 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:48:50 crc kubenswrapper[4650]: I0201 07:48:50.965370 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:48:50 crc kubenswrapper[4650]: E0201 07:48:50.966101 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:48:52 crc kubenswrapper[4650]: I0201 07:48:52.959446 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mqsp9"] Feb 01 07:48:52 crc kubenswrapper[4650]: I0201 07:48:52.963397 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.012821 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mqsp9"] Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.037012 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-utilities\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.037102 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqjsq\" (UniqueName: \"kubernetes.io/projected/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-kube-api-access-nqjsq\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.037269 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-catalog-content\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.139420 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-utilities\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.139525 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqjsq\" (UniqueName: \"kubernetes.io/projected/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-kube-api-access-nqjsq\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.139664 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-catalog-content\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.140088 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-utilities\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.140104 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-catalog-content\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.162683 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqjsq\" (UniqueName: \"kubernetes.io/projected/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-kube-api-access-nqjsq\") pod \"redhat-operators-mqsp9\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.289307 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:48:53 crc kubenswrapper[4650]: I0201 07:48:53.823075 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mqsp9"] Feb 01 07:48:54 crc kubenswrapper[4650]: I0201 07:48:54.567620 4650 generic.go:334] "Generic (PLEG): container finished" podID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerID="4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543" exitCode=0 Feb 01 07:48:54 crc kubenswrapper[4650]: I0201 07:48:54.567946 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqsp9" event={"ID":"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb","Type":"ContainerDied","Data":"4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543"} Feb 01 07:48:54 crc kubenswrapper[4650]: I0201 07:48:54.567973 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqsp9" event={"ID":"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb","Type":"ContainerStarted","Data":"55542c5f6a9ad897a65c9fefabd662453b55f03fdcb3651f49081f50a31c25e1"} Feb 01 07:48:55 crc kubenswrapper[4650]: I0201 07:48:55.591879 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqsp9" event={"ID":"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb","Type":"ContainerStarted","Data":"ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6"} Feb 01 07:48:59 crc kubenswrapper[4650]: I0201 07:48:59.646128 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="0833a80649ab7d4f2475b1080ca5c9f01d81430b7f44565b92dc4c126f3c0af6" exitCode=1 Feb 01 07:48:59 crc kubenswrapper[4650]: I0201 07:48:59.646174 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"0833a80649ab7d4f2475b1080ca5c9f01d81430b7f44565b92dc4c126f3c0af6"} Feb 01 07:48:59 crc kubenswrapper[4650]: I0201 07:48:59.646766 4650 scope.go:117] "RemoveContainer" containerID="50273097fa73190c557fb1b573c67450a63d38490679debfafff88dd70648dc6" Feb 01 07:48:59 crc kubenswrapper[4650]: I0201 07:48:59.648061 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:48:59 crc kubenswrapper[4650]: I0201 07:48:59.648159 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:48:59 crc kubenswrapper[4650]: I0201 07:48:59.648201 4650 scope.go:117] "RemoveContainer" containerID="0833a80649ab7d4f2475b1080ca5c9f01d81430b7f44565b92dc4c126f3c0af6" Feb 01 07:48:59 crc kubenswrapper[4650]: I0201 07:48:59.648331 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:48:59 crc kubenswrapper[4650]: E0201 07:48:59.648944 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:49:00 crc kubenswrapper[4650]: I0201 07:49:00.656604 4650 generic.go:334] "Generic (PLEG): container finished" podID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerID="ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6" exitCode=0 Feb 01 07:49:00 crc kubenswrapper[4650]: I0201 07:49:00.656676 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqsp9" event={"ID":"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb","Type":"ContainerDied","Data":"ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6"} Feb 01 07:49:00 crc kubenswrapper[4650]: I0201 07:49:00.965801 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:49:00 crc kubenswrapper[4650]: I0201 07:49:00.965843 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:49:00 crc kubenswrapper[4650]: E0201 07:49:00.966266 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:49:01 crc kubenswrapper[4650]: I0201 07:49:01.683370 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqsp9" event={"ID":"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb","Type":"ContainerStarted","Data":"35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a"} Feb 01 07:49:01 crc kubenswrapper[4650]: I0201 07:49:01.704359 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mqsp9" podStartSLOduration=3.200853683 podStartE2EDuration="9.704342541s" podCreationTimestamp="2026-02-01 07:48:52 +0000 UTC" firstStartedPulling="2026-02-01 07:48:54.569474842 +0000 UTC m=+1533.292573097" lastFinishedPulling="2026-02-01 07:49:01.07296368 +0000 UTC m=+1539.796061955" observedRunningTime="2026-02-01 07:49:01.697699757 +0000 UTC m=+1540.420798012" watchObservedRunningTime="2026-02-01 07:49:01.704342541 +0000 UTC m=+1540.427440796" Feb 01 07:49:01 crc kubenswrapper[4650]: I0201 07:49:01.972861 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:49:01 crc kubenswrapper[4650]: E0201 07:49:01.973201 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:49:03 crc kubenswrapper[4650]: I0201 07:49:03.290454 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:49:03 crc kubenswrapper[4650]: I0201 07:49:03.290521 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:49:04 crc kubenswrapper[4650]: I0201 07:49:04.345188 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mqsp9" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="registry-server" probeResult="failure" output=< Feb 01 07:49:04 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 07:49:04 crc kubenswrapper[4650]: > Feb 01 07:49:13 crc kubenswrapper[4650]: I0201 07:49:13.964980 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:49:13 crc kubenswrapper[4650]: E0201 07:49:13.965802 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:49:13 crc kubenswrapper[4650]: I0201 07:49:13.965942 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:49:13 crc kubenswrapper[4650]: I0201 07:49:13.965994 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:49:13 crc kubenswrapper[4650]: I0201 07:49:13.966017 4650 scope.go:117] "RemoveContainer" containerID="0833a80649ab7d4f2475b1080ca5c9f01d81430b7f44565b92dc4c126f3c0af6" Feb 01 07:49:13 crc kubenswrapper[4650]: I0201 07:49:13.966099 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:49:13 crc kubenswrapper[4650]: E0201 07:49:13.966406 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:49:14 crc kubenswrapper[4650]: I0201 07:49:14.343116 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mqsp9" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="registry-server" probeResult="failure" output=< Feb 01 07:49:14 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 07:49:14 crc kubenswrapper[4650]: > Feb 01 07:49:15 crc kubenswrapper[4650]: I0201 07:49:15.964936 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:49:15 crc kubenswrapper[4650]: I0201 07:49:15.965329 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:49:15 crc kubenswrapper[4650]: E0201 07:49:15.965694 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:49:23 crc kubenswrapper[4650]: I0201 07:49:23.381306 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:49:23 crc kubenswrapper[4650]: I0201 07:49:23.448171 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:49:24 crc kubenswrapper[4650]: I0201 07:49:24.158227 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mqsp9"] Feb 01 07:49:24 crc kubenswrapper[4650]: I0201 07:49:24.918852 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mqsp9" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="registry-server" containerID="cri-o://35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a" gracePeriod=2 Feb 01 07:49:24 crc kubenswrapper[4650]: I0201 07:49:24.965634 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:49:24 crc kubenswrapper[4650]: E0201 07:49:24.966121 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:49:24 crc kubenswrapper[4650]: I0201 07:49:24.966185 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:49:24 crc kubenswrapper[4650]: I0201 07:49:24.966305 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:49:24 crc kubenswrapper[4650]: I0201 07:49:24.966355 4650 scope.go:117] "RemoveContainer" containerID="0833a80649ab7d4f2475b1080ca5c9f01d81430b7f44565b92dc4c126f3c0af6" Feb 01 07:49:24 crc kubenswrapper[4650]: I0201 07:49:24.966501 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.146582 4650 scope.go:117] "RemoveContainer" containerID="c23a4e072c8b04bf5e19d74440d7967f46d9f4d8232816b798b219932cdd38dd" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.175230 4650 scope.go:117] "RemoveContainer" containerID="37cd4406ada2c9a6fa17ddf90e372c2026b5e351b6825103b029e49fa4c52eaf" Feb 01 07:49:25 crc kubenswrapper[4650]: E0201 07:49:25.215468 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.453560 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.577942 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-catalog-content\") pod \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.578186 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqjsq\" (UniqueName: \"kubernetes.io/projected/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-kube-api-access-nqjsq\") pod \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.578369 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-utilities\") pod \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\" (UID: \"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb\") " Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.580102 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-utilities" (OuterVolumeSpecName: "utilities") pod "a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" (UID: "a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.581248 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.589852 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-kube-api-access-nqjsq" (OuterVolumeSpecName: "kube-api-access-nqjsq") pod "a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" (UID: "a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb"). InnerVolumeSpecName "kube-api-access-nqjsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.683620 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqjsq\" (UniqueName: \"kubernetes.io/projected/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-kube-api-access-nqjsq\") on node \"crc\" DevicePath \"\"" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.737449 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" (UID: "a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.785079 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.932438 4650 generic.go:334] "Generic (PLEG): container finished" podID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerID="35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a" exitCode=0 Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.932554 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqsp9" event={"ID":"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb","Type":"ContainerDied","Data":"35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a"} Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.932592 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqsp9" event={"ID":"a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb","Type":"ContainerDied","Data":"55542c5f6a9ad897a65c9fefabd662453b55f03fdcb3651f49081f50a31c25e1"} Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.932693 4650 scope.go:117] "RemoveContainer" containerID="35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.933106 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqsp9" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.942499 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561"} Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.943251 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.943310 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.943396 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:49:25 crc kubenswrapper[4650]: E0201 07:49:25.943723 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:49:25 crc kubenswrapper[4650]: I0201 07:49:25.963789 4650 scope.go:117] "RemoveContainer" containerID="ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.019348 4650 scope.go:117] "RemoveContainer" containerID="4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.029017 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mqsp9"] Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.039082 4650 scope.go:117] "RemoveContainer" containerID="35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a" Feb 01 07:49:26 crc kubenswrapper[4650]: E0201 07:49:26.039747 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a\": container with ID starting with 35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a not found: ID does not exist" containerID="35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.039823 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a"} err="failed to get container status \"35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a\": rpc error: code = NotFound desc = could not find container \"35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a\": container with ID starting with 35f7c0d98ffc0304f12248aeac509784c11235408b2b3523350556e1e305e70a not found: ID does not exist" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.039858 4650 scope.go:117] "RemoveContainer" containerID="ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6" Feb 01 07:49:26 crc kubenswrapper[4650]: E0201 07:49:26.040678 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6\": container with ID starting with ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6 not found: ID does not exist" containerID="ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.040721 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6"} err="failed to get container status \"ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6\": rpc error: code = NotFound desc = could not find container \"ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6\": container with ID starting with ab563f68012d52bf265010026bcd19ffa807404b5629ca9326240809e1c732b6 not found: ID does not exist" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.040736 4650 scope.go:117] "RemoveContainer" containerID="4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.041957 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mqsp9"] Feb 01 07:49:26 crc kubenswrapper[4650]: E0201 07:49:26.042256 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543\": container with ID starting with 4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543 not found: ID does not exist" containerID="4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543" Feb 01 07:49:26 crc kubenswrapper[4650]: I0201 07:49:26.042280 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543"} err="failed to get container status \"4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543\": rpc error: code = NotFound desc = could not find container \"4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543\": container with ID starting with 4265b681d525c93584e0100917abdd0f1072fa83963a6200172f63064c379543 not found: ID does not exist" Feb 01 07:49:27 crc kubenswrapper[4650]: I0201 07:49:27.975191 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" path="/var/lib/kubelet/pods/a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb/volumes" Feb 01 07:49:28 crc kubenswrapper[4650]: I0201 07:49:28.965112 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:49:28 crc kubenswrapper[4650]: I0201 07:49:28.965135 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:49:28 crc kubenswrapper[4650]: E0201 07:49:28.965430 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:49:36 crc kubenswrapper[4650]: I0201 07:49:36.965143 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:49:36 crc kubenswrapper[4650]: I0201 07:49:36.965685 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:49:36 crc kubenswrapper[4650]: I0201 07:49:36.965792 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:49:36 crc kubenswrapper[4650]: E0201 07:49:36.966154 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:49:36 crc kubenswrapper[4650]: I0201 07:49:36.966480 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:49:36 crc kubenswrapper[4650]: E0201 07:49:36.966657 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:49:40 crc kubenswrapper[4650]: I0201 07:49:40.965628 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:49:40 crc kubenswrapper[4650]: I0201 07:49:40.966085 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:49:40 crc kubenswrapper[4650]: E0201 07:49:40.966608 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.485300 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bddch"] Feb 01 07:49:41 crc kubenswrapper[4650]: E0201 07:49:41.486358 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="extract-utilities" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.486375 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="extract-utilities" Feb 01 07:49:41 crc kubenswrapper[4650]: E0201 07:49:41.486387 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="extract-content" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.486393 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="extract-content" Feb 01 07:49:41 crc kubenswrapper[4650]: E0201 07:49:41.486418 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="registry-server" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.486424 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="registry-server" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.486603 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4f8c7a9-d8b6-4894-bf43-5bad0538b5bb" containerName="registry-server" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.488412 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.495543 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bddch"] Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.516255 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-utilities\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.516332 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-catalog-content\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.516389 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwhvv\" (UniqueName: \"kubernetes.io/projected/42d53310-48e3-4c5b-846c-b5f2cf1f1877-kube-api-access-dwhvv\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.620270 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-catalog-content\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.620373 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwhvv\" (UniqueName: \"kubernetes.io/projected/42d53310-48e3-4c5b-846c-b5f2cf1f1877-kube-api-access-dwhvv\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.620536 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-utilities\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.620921 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-utilities\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.621155 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-catalog-content\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.641011 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwhvv\" (UniqueName: \"kubernetes.io/projected/42d53310-48e3-4c5b-846c-b5f2cf1f1877-kube-api-access-dwhvv\") pod \"certified-operators-bddch\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:41 crc kubenswrapper[4650]: I0201 07:49:41.814517 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:42 crc kubenswrapper[4650]: I0201 07:49:42.301166 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bddch"] Feb 01 07:49:43 crc kubenswrapper[4650]: I0201 07:49:43.126930 4650 generic.go:334] "Generic (PLEG): container finished" podID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerID="33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6" exitCode=0 Feb 01 07:49:43 crc kubenswrapper[4650]: I0201 07:49:43.127206 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bddch" event={"ID":"42d53310-48e3-4c5b-846c-b5f2cf1f1877","Type":"ContainerDied","Data":"33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6"} Feb 01 07:49:43 crc kubenswrapper[4650]: I0201 07:49:43.127325 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bddch" event={"ID":"42d53310-48e3-4c5b-846c-b5f2cf1f1877","Type":"ContainerStarted","Data":"92e797671b5fb43e5b195019549f11be3a7df5caf7abcc8041f38eb41466a900"} Feb 01 07:49:43 crc kubenswrapper[4650]: I0201 07:49:43.130929 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 07:49:44 crc kubenswrapper[4650]: I0201 07:49:44.143324 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bddch" event={"ID":"42d53310-48e3-4c5b-846c-b5f2cf1f1877","Type":"ContainerStarted","Data":"4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d"} Feb 01 07:49:46 crc kubenswrapper[4650]: I0201 07:49:46.209193 4650 generic.go:334] "Generic (PLEG): container finished" podID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerID="4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d" exitCode=0 Feb 01 07:49:46 crc kubenswrapper[4650]: I0201 07:49:46.209663 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bddch" event={"ID":"42d53310-48e3-4c5b-846c-b5f2cf1f1877","Type":"ContainerDied","Data":"4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d"} Feb 01 07:49:47 crc kubenswrapper[4650]: I0201 07:49:47.220172 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bddch" event={"ID":"42d53310-48e3-4c5b-846c-b5f2cf1f1877","Type":"ContainerStarted","Data":"a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8"} Feb 01 07:49:47 crc kubenswrapper[4650]: I0201 07:49:47.264123 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bddch" podStartSLOduration=2.7392334050000002 podStartE2EDuration="6.264101868s" podCreationTimestamp="2026-02-01 07:49:41 +0000 UTC" firstStartedPulling="2026-02-01 07:49:43.129920514 +0000 UTC m=+1581.853018769" lastFinishedPulling="2026-02-01 07:49:46.654788987 +0000 UTC m=+1585.377887232" observedRunningTime="2026-02-01 07:49:47.2542706 +0000 UTC m=+1585.977368865" watchObservedRunningTime="2026-02-01 07:49:47.264101868 +0000 UTC m=+1585.987200133" Feb 01 07:49:47 crc kubenswrapper[4650]: I0201 07:49:47.966819 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:49:47 crc kubenswrapper[4650]: E0201 07:49:47.967770 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:49:49 crc kubenswrapper[4650]: I0201 07:49:49.966370 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:49:49 crc kubenswrapper[4650]: I0201 07:49:49.966708 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:49:49 crc kubenswrapper[4650]: I0201 07:49:49.966824 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:49:49 crc kubenswrapper[4650]: E0201 07:49:49.967180 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:49:51 crc kubenswrapper[4650]: I0201 07:49:51.814747 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:51 crc kubenswrapper[4650]: I0201 07:49:51.815160 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:51 crc kubenswrapper[4650]: I0201 07:49:51.894010 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:51 crc kubenswrapper[4650]: I0201 07:49:51.978831 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:49:51 crc kubenswrapper[4650]: I0201 07:49:51.978870 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:49:52 crc kubenswrapper[4650]: E0201 07:49:52.214970 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:49:52 crc kubenswrapper[4650]: I0201 07:49:52.271566 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b"} Feb 01 07:49:52 crc kubenswrapper[4650]: I0201 07:49:52.271916 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:49:52 crc kubenswrapper[4650]: I0201 07:49:52.272533 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:49:52 crc kubenswrapper[4650]: E0201 07:49:52.272796 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:49:52 crc kubenswrapper[4650]: I0201 07:49:52.332214 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:52 crc kubenswrapper[4650]: I0201 07:49:52.393578 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bddch"] Feb 01 07:49:53 crc kubenswrapper[4650]: I0201 07:49:53.280003 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:49:53 crc kubenswrapper[4650]: E0201 07:49:53.280212 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.291419 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bddch" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="registry-server" containerID="cri-o://a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8" gracePeriod=2 Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.812597 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.879140 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwhvv\" (UniqueName: \"kubernetes.io/projected/42d53310-48e3-4c5b-846c-b5f2cf1f1877-kube-api-access-dwhvv\") pod \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.879229 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-utilities\") pod \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.879308 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-catalog-content\") pod \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\" (UID: \"42d53310-48e3-4c5b-846c-b5f2cf1f1877\") " Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.880667 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-utilities" (OuterVolumeSpecName: "utilities") pod "42d53310-48e3-4c5b-846c-b5f2cf1f1877" (UID: "42d53310-48e3-4c5b-846c-b5f2cf1f1877"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.887570 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42d53310-48e3-4c5b-846c-b5f2cf1f1877-kube-api-access-dwhvv" (OuterVolumeSpecName: "kube-api-access-dwhvv") pod "42d53310-48e3-4c5b-846c-b5f2cf1f1877" (UID: "42d53310-48e3-4c5b-846c-b5f2cf1f1877"). InnerVolumeSpecName "kube-api-access-dwhvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.954847 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "42d53310-48e3-4c5b-846c-b5f2cf1f1877" (UID: "42d53310-48e3-4c5b-846c-b5f2cf1f1877"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.981480 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.981512 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42d53310-48e3-4c5b-846c-b5f2cf1f1877-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:49:54 crc kubenswrapper[4650]: I0201 07:49:54.981526 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwhvv\" (UniqueName: \"kubernetes.io/projected/42d53310-48e3-4c5b-846c-b5f2cf1f1877-kube-api-access-dwhvv\") on node \"crc\" DevicePath \"\"" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.305122 4650 generic.go:334] "Generic (PLEG): container finished" podID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerID="a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8" exitCode=0 Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.305138 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bddch" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.305203 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bddch" event={"ID":"42d53310-48e3-4c5b-846c-b5f2cf1f1877","Type":"ContainerDied","Data":"a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8"} Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.305388 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bddch" event={"ID":"42d53310-48e3-4c5b-846c-b5f2cf1f1877","Type":"ContainerDied","Data":"92e797671b5fb43e5b195019549f11be3a7df5caf7abcc8041f38eb41466a900"} Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.305426 4650 scope.go:117] "RemoveContainer" containerID="a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.339197 4650 scope.go:117] "RemoveContainer" containerID="4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.361255 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bddch"] Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.373663 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bddch"] Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.387190 4650 scope.go:117] "RemoveContainer" containerID="33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.417982 4650 scope.go:117] "RemoveContainer" containerID="a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8" Feb 01 07:49:55 crc kubenswrapper[4650]: E0201 07:49:55.418533 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8\": container with ID starting with a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8 not found: ID does not exist" containerID="a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.418668 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8"} err="failed to get container status \"a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8\": rpc error: code = NotFound desc = could not find container \"a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8\": container with ID starting with a3c3fa6891842e2957d1a45575e971701e741374c95516c76ae41b44c9ed05f8 not found: ID does not exist" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.418765 4650 scope.go:117] "RemoveContainer" containerID="4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d" Feb 01 07:49:55 crc kubenswrapper[4650]: E0201 07:49:55.419277 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d\": container with ID starting with 4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d not found: ID does not exist" containerID="4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.419387 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d"} err="failed to get container status \"4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d\": rpc error: code = NotFound desc = could not find container \"4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d\": container with ID starting with 4a6d048e5b038bff7495169acf1dfcddd83fa81f886721d45dff3bfa9e02305d not found: ID does not exist" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.419470 4650 scope.go:117] "RemoveContainer" containerID="33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6" Feb 01 07:49:55 crc kubenswrapper[4650]: E0201 07:49:55.419885 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6\": container with ID starting with 33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6 not found: ID does not exist" containerID="33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.419950 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6"} err="failed to get container status \"33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6\": rpc error: code = NotFound desc = could not find container \"33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6\": container with ID starting with 33a99130b20c6afc09667bb0e0d4bb2a866fbdfffa1153d9685077a36d8643a6 not found: ID does not exist" Feb 01 07:49:55 crc kubenswrapper[4650]: I0201 07:49:55.977258 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" path="/var/lib/kubelet/pods/42d53310-48e3-4c5b-846c-b5f2cf1f1877/volumes" Feb 01 07:49:57 crc kubenswrapper[4650]: I0201 07:49:57.819564 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:49:59 crc kubenswrapper[4650]: I0201 07:49:59.810391 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:49:59 crc kubenswrapper[4650]: I0201 07:49:59.965589 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:49:59 crc kubenswrapper[4650]: E0201 07:49:59.966987 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:50:00 crc kubenswrapper[4650]: I0201 07:50:00.805124 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:50:00 crc kubenswrapper[4650]: I0201 07:50:00.966127 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:50:00 crc kubenswrapper[4650]: I0201 07:50:00.966813 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:50:00 crc kubenswrapper[4650]: I0201 07:50:00.967070 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:50:00 crc kubenswrapper[4650]: E0201 07:50:00.967735 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:50:03 crc kubenswrapper[4650]: I0201 07:50:03.811772 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:50:03 crc kubenswrapper[4650]: I0201 07:50:03.812282 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:50:03 crc kubenswrapper[4650]: I0201 07:50:03.813019 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:50:03 crc kubenswrapper[4650]: I0201 07:50:03.813054 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:50:03 crc kubenswrapper[4650]: I0201 07:50:03.813079 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" gracePeriod=30 Feb 01 07:50:03 crc kubenswrapper[4650]: I0201 07:50:03.821411 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:50:03 crc kubenswrapper[4650]: E0201 07:50:03.958520 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:50:04 crc kubenswrapper[4650]: I0201 07:50:04.413562 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" exitCode=0 Feb 01 07:50:04 crc kubenswrapper[4650]: I0201 07:50:04.413615 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b"} Feb 01 07:50:04 crc kubenswrapper[4650]: I0201 07:50:04.413673 4650 scope.go:117] "RemoveContainer" containerID="8c120d6cdeaaed503f341d0da56919763582ad818b728781ebd5ada12533feed" Feb 01 07:50:04 crc kubenswrapper[4650]: I0201 07:50:04.414708 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:50:04 crc kubenswrapper[4650]: I0201 07:50:04.414747 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:50:04 crc kubenswrapper[4650]: E0201 07:50:04.415260 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:50:13 crc kubenswrapper[4650]: I0201 07:50:13.967206 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:50:13 crc kubenswrapper[4650]: I0201 07:50:13.968140 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:50:13 crc kubenswrapper[4650]: I0201 07:50:13.968402 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:50:13 crc kubenswrapper[4650]: E0201 07:50:13.969176 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:50:14 crc kubenswrapper[4650]: I0201 07:50:14.964981 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:50:14 crc kubenswrapper[4650]: I0201 07:50:14.965443 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:50:14 crc kubenswrapper[4650]: I0201 07:50:14.965476 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:50:14 crc kubenswrapper[4650]: E0201 07:50:14.965563 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:50:14 crc kubenswrapper[4650]: E0201 07:50:14.965866 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:50:22 crc kubenswrapper[4650]: I0201 07:50:22.651119 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561" exitCode=1 Feb 01 07:50:22 crc kubenswrapper[4650]: I0201 07:50:22.651207 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561"} Feb 01 07:50:22 crc kubenswrapper[4650]: I0201 07:50:22.651970 4650 scope.go:117] "RemoveContainer" containerID="0833a80649ab7d4f2475b1080ca5c9f01d81430b7f44565b92dc4c126f3c0af6" Feb 01 07:50:22 crc kubenswrapper[4650]: I0201 07:50:22.652968 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:50:22 crc kubenswrapper[4650]: I0201 07:50:22.653042 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:50:22 crc kubenswrapper[4650]: I0201 07:50:22.653065 4650 scope.go:117] "RemoveContainer" containerID="be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561" Feb 01 07:50:22 crc kubenswrapper[4650]: I0201 07:50:22.653210 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:50:22 crc kubenswrapper[4650]: E0201 07:50:22.653837 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.087239 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-nkfbd"] Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.097449 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-px9bz"] Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.114176 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f6d0-account-create-update-nhvh2"] Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.123863 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-px9bz"] Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.131318 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-nkfbd"] Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.137751 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f6d0-account-create-update-nhvh2"] Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.257822 4650 scope.go:117] "RemoveContainer" containerID="2da597a32f0a46d324d9340fc57a17fe507ac7fa3133ce14d350b1ad1709332b" Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.297891 4650 scope.go:117] "RemoveContainer" containerID="5c22ef4e8bf773054d60d296e03a78fe3cd47e9f78694c1d99fd5bd8b0eedefa" Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.966152 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:50:25 crc kubenswrapper[4650]: E0201 07:50:25.966486 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.987720 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc47f442-6ef1-4710-8033-9a9367b45a24" path="/var/lib/kubelet/pods/cc47f442-6ef1-4710-8033-9a9367b45a24/volumes" Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.990629 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfbfc491-edcf-4ced-88d5-68f3373f5aa7" path="/var/lib/kubelet/pods/dfbfc491-edcf-4ced-88d5-68f3373f5aa7/volumes" Feb 01 07:50:25 crc kubenswrapper[4650]: I0201 07:50:25.991850 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dff691a9-0cd8-42ec-9f8d-1fbe9429566b" path="/var/lib/kubelet/pods/dff691a9-0cd8-42ec-9f8d-1fbe9429566b/volumes" Feb 01 07:50:28 crc kubenswrapper[4650]: I0201 07:50:28.057390 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-1135-account-create-update-n5rhb"] Feb 01 07:50:28 crc kubenswrapper[4650]: I0201 07:50:28.067543 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-1135-account-create-update-n5rhb"] Feb 01 07:50:28 crc kubenswrapper[4650]: I0201 07:50:28.965914 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:50:28 crc kubenswrapper[4650]: I0201 07:50:28.965940 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:50:28 crc kubenswrapper[4650]: E0201 07:50:28.966147 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:50:29 crc kubenswrapper[4650]: I0201 07:50:29.986300 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c29250-327f-47ad-b068-f42861c819ab" path="/var/lib/kubelet/pods/49c29250-327f-47ad-b068-f42861c819ab/volumes" Feb 01 07:50:31 crc kubenswrapper[4650]: I0201 07:50:31.045947 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-24db-account-create-update-nngtm"] Feb 01 07:50:31 crc kubenswrapper[4650]: I0201 07:50:31.061561 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-24db-account-create-update-nngtm"] Feb 01 07:50:31 crc kubenswrapper[4650]: I0201 07:50:31.991423 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6b48882-4a16-4c93-8a4b-3118bea76c46" path="/var/lib/kubelet/pods/c6b48882-4a16-4c93-8a4b-3118bea76c46/volumes" Feb 01 07:50:32 crc kubenswrapper[4650]: I0201 07:50:32.046935 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-cl46t"] Feb 01 07:50:32 crc kubenswrapper[4650]: I0201 07:50:32.060473 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-cl46t"] Feb 01 07:50:33 crc kubenswrapper[4650]: I0201 07:50:33.979802 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a8a7b3a-cfd2-43de-9026-fb9511531544" path="/var/lib/kubelet/pods/6a8a7b3a-cfd2-43de-9026-fb9511531544/volumes" Feb 01 07:50:34 crc kubenswrapper[4650]: I0201 07:50:34.966918 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:50:34 crc kubenswrapper[4650]: I0201 07:50:34.967492 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:50:34 crc kubenswrapper[4650]: I0201 07:50:34.967745 4650 scope.go:117] "RemoveContainer" containerID="be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561" Feb 01 07:50:34 crc kubenswrapper[4650]: I0201 07:50:34.968143 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:50:34 crc kubenswrapper[4650]: E0201 07:50:34.969221 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:50:40 crc kubenswrapper[4650]: I0201 07:50:40.735981 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:50:40 crc kubenswrapper[4650]: E0201 07:50:40.736251 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:50:40 crc kubenswrapper[4650]: E0201 07:50:40.736709 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:52:42.736690784 +0000 UTC m=+1761.459789029 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:50:40 crc kubenswrapper[4650]: I0201 07:50:40.965202 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:50:40 crc kubenswrapper[4650]: E0201 07:50:40.965449 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:50:42 crc kubenswrapper[4650]: E0201 07:50:42.396141 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:50:42 crc kubenswrapper[4650]: I0201 07:50:42.954483 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:50:43 crc kubenswrapper[4650]: I0201 07:50:43.040530 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-fpjpx"] Feb 01 07:50:43 crc kubenswrapper[4650]: I0201 07:50:43.047941 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-fpjpx"] Feb 01 07:50:43 crc kubenswrapper[4650]: I0201 07:50:43.964611 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:50:43 crc kubenswrapper[4650]: I0201 07:50:43.964636 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:50:43 crc kubenswrapper[4650]: E0201 07:50:43.964853 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:50:43 crc kubenswrapper[4650]: I0201 07:50:43.975088 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97cad671-0078-4ca0-a66c-53b9d93adb4a" path="/var/lib/kubelet/pods/97cad671-0078-4ca0-a66c-53b9d93adb4a/volumes" Feb 01 07:50:46 crc kubenswrapper[4650]: I0201 07:50:46.965575 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:50:46 crc kubenswrapper[4650]: I0201 07:50:46.966129 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:50:46 crc kubenswrapper[4650]: I0201 07:50:46.966150 4650 scope.go:117] "RemoveContainer" containerID="be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561" Feb 01 07:50:46 crc kubenswrapper[4650]: I0201 07:50:46.966215 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:50:46 crc kubenswrapper[4650]: E0201 07:50:46.966531 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:50:51 crc kubenswrapper[4650]: I0201 07:50:51.970914 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:50:51 crc kubenswrapper[4650]: E0201 07:50:51.971524 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:50:56 crc kubenswrapper[4650]: I0201 07:50:56.965719 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:50:56 crc kubenswrapper[4650]: I0201 07:50:56.966391 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:50:56 crc kubenswrapper[4650]: E0201 07:50:56.966753 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:50:58 crc kubenswrapper[4650]: I0201 07:50:58.967522 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:50:58 crc kubenswrapper[4650]: I0201 07:50:58.967947 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:50:58 crc kubenswrapper[4650]: I0201 07:50:58.967992 4650 scope.go:117] "RemoveContainer" containerID="be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561" Feb 01 07:50:58 crc kubenswrapper[4650]: I0201 07:50:58.968139 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:50:58 crc kubenswrapper[4650]: E0201 07:50:58.968790 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.084001 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6caf-account-create-update-lwpqh"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.107962 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-a6d9-account-create-update-x2cpx"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.122298 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-6b89b"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.134183 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-zc2xb"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.145378 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-a6d9-account-create-update-x2cpx"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.153216 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-zc2xb"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.161054 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6caf-account-create-update-lwpqh"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.167701 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-6b89b"] Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.985597 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44937f93-aef1-4223-ad7e-5d05832d2f4b" path="/var/lib/kubelet/pods/44937f93-aef1-4223-ad7e-5d05832d2f4b/volumes" Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.987269 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8368ba08-f07f-4082-bfd0-e72e1a38d7a8" path="/var/lib/kubelet/pods/8368ba08-f07f-4082-bfd0-e72e1a38d7a8/volumes" Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.988657 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92592241-70ef-42cc-b3b7-3a85bcdba8a8" path="/var/lib/kubelet/pods/92592241-70ef-42cc-b3b7-3a85bcdba8a8/volumes" Feb 01 07:51:03 crc kubenswrapper[4650]: I0201 07:51:03.990136 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8de9a37-3519-4804-b3e8-197bea437afe" path="/var/lib/kubelet/pods/b8de9a37-3519-4804-b3e8-197bea437afe/volumes" Feb 01 07:51:04 crc kubenswrapper[4650]: I0201 07:51:04.053872 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3173-account-create-update-l5lzr"] Feb 01 07:51:04 crc kubenswrapper[4650]: I0201 07:51:04.061087 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-fllvl"] Feb 01 07:51:04 crc kubenswrapper[4650]: I0201 07:51:04.074660 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-3173-account-create-update-l5lzr"] Feb 01 07:51:04 crc kubenswrapper[4650]: I0201 07:51:04.085927 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-fllvl"] Feb 01 07:51:05 crc kubenswrapper[4650]: I0201 07:51:05.982621 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60f5edb2-23c7-4720-a7f0-8a635e39cd03" path="/var/lib/kubelet/pods/60f5edb2-23c7-4720-a7f0-8a635e39cd03/volumes" Feb 01 07:51:05 crc kubenswrapper[4650]: I0201 07:51:05.983663 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aee86276-99b2-44ef-ae5d-6072f34ffe58" path="/var/lib/kubelet/pods/aee86276-99b2-44ef-ae5d-6072f34ffe58/volumes" Feb 01 07:51:06 crc kubenswrapper[4650]: I0201 07:51:06.965217 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:51:06 crc kubenswrapper[4650]: E0201 07:51:06.966276 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:51:07 crc kubenswrapper[4650]: I0201 07:51:07.040376 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-bkqlt"] Feb 01 07:51:07 crc kubenswrapper[4650]: I0201 07:51:07.051998 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-bkqlt"] Feb 01 07:51:07 crc kubenswrapper[4650]: I0201 07:51:07.966059 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:51:07 crc kubenswrapper[4650]: I0201 07:51:07.966087 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:51:07 crc kubenswrapper[4650]: E0201 07:51:07.966457 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:51:07 crc kubenswrapper[4650]: I0201 07:51:07.981625 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b01aeb4f-ec32-444e-b714-6ab54c79bad3" path="/var/lib/kubelet/pods/b01aeb4f-ec32-444e-b714-6ab54c79bad3/volumes" Feb 01 07:51:12 crc kubenswrapper[4650]: I0201 07:51:12.966581 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:51:12 crc kubenswrapper[4650]: I0201 07:51:12.967172 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:51:12 crc kubenswrapper[4650]: I0201 07:51:12.967196 4650 scope.go:117] "RemoveContainer" containerID="be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561" Feb 01 07:51:12 crc kubenswrapper[4650]: I0201 07:51:12.967250 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:51:13 crc kubenswrapper[4650]: I0201 07:51:13.073956 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-sz8cr"] Feb 01 07:51:13 crc kubenswrapper[4650]: I0201 07:51:13.091938 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-sz8cr"] Feb 01 07:51:13 crc kubenswrapper[4650]: E0201 07:51:13.183204 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:13 crc kubenswrapper[4650]: I0201 07:51:13.296547 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e"} Feb 01 07:51:13 crc kubenswrapper[4650]: I0201 07:51:13.297300 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:51:13 crc kubenswrapper[4650]: I0201 07:51:13.297360 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:51:13 crc kubenswrapper[4650]: I0201 07:51:13.297448 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:51:13 crc kubenswrapper[4650]: E0201 07:51:13.297708 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:13 crc kubenswrapper[4650]: I0201 07:51:13.981005 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3127db22-5d48-4d3e-bd3e-806a06e6cad8" path="/var/lib/kubelet/pods/3127db22-5d48-4d3e-bd3e-806a06e6cad8/volumes" Feb 01 07:51:21 crc kubenswrapper[4650]: I0201 07:51:21.978342 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:51:21 crc kubenswrapper[4650]: E0201 07:51:21.979256 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:51:22 crc kubenswrapper[4650]: I0201 07:51:22.965781 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:51:22 crc kubenswrapper[4650]: I0201 07:51:22.965823 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:51:22 crc kubenswrapper[4650]: E0201 07:51:22.966242 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.436631 4650 scope.go:117] "RemoveContainer" containerID="661db6a99c171dffcf7b9e23a488449dd64a4095b44ea3dd32fb4f78c5362a0a" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.470115 4650 scope.go:117] "RemoveContainer" containerID="a4c3419430b19047f133d5cd68eeaeef7602934e5639e9f02510aaec85580f3f" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.546230 4650 scope.go:117] "RemoveContainer" containerID="da46aa12b1173720990b7676be01c57190bdd6a845a02438538595c9c31b4a09" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.594783 4650 scope.go:117] "RemoveContainer" containerID="afb6f7e7a6361166a79d5c9c9c11c3e7a85925839b9e3a25d3c34479c55be767" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.658769 4650 scope.go:117] "RemoveContainer" containerID="47c77622a6e374d174339b58a313b096f458fa48d3f755f722f33343b2680dbc" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.708322 4650 scope.go:117] "RemoveContainer" containerID="8d5e5dbe5759c31bf64982ba526d498bc73d1f9f0f7eeff69ca59b13ca7d8701" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.746348 4650 scope.go:117] "RemoveContainer" containerID="bf03f0e1b53e1196ecb8395cf75dd74193c0e458b3322b2002b77202a701682f" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.772386 4650 scope.go:117] "RemoveContainer" containerID="32d81a88d968689fba06ddcdc747ee142ebd543892b4554577feb395a000e4db" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.795403 4650 scope.go:117] "RemoveContainer" containerID="d325c2535cb35a0256074fa0ec2c38aceb9baae973c61da8d6d5fe1b1c585c06" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.831510 4650 scope.go:117] "RemoveContainer" containerID="1cd89493603a6be879e8b8e1b224d9ecffd87c85fb286fb9aee3bfc571e25cfd" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.847838 4650 scope.go:117] "RemoveContainer" containerID="d70d4e097467770c1b69cc43e7089ed32e2a33e86124026db3fd2fbec8555db5" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.875736 4650 scope.go:117] "RemoveContainer" containerID="9224ae6d3e41fb31799d33c0395616bd510a4ea25b4d166b85998ef53f01e8b4" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.900242 4650 scope.go:117] "RemoveContainer" containerID="c6c329464cf6866eaac072a3bbd926192fefe4f51643c556b4c28a270c2ef9a2" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.922528 4650 scope.go:117] "RemoveContainer" containerID="a8d34655beb163077dd0a62ab6ed81fe4ff5c3fbc4ab53e6e25ef443e61cacbf" Feb 01 07:51:25 crc kubenswrapper[4650]: I0201 07:51:25.945666 4650 scope.go:117] "RemoveContainer" containerID="b5eb363e96674975163c0fdcf1abb1291e2df1a43bbc523300a5ee0145efa2d7" Feb 01 07:51:26 crc kubenswrapper[4650]: I0201 07:51:26.965518 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:51:26 crc kubenswrapper[4650]: I0201 07:51:26.965935 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:51:26 crc kubenswrapper[4650]: I0201 07:51:26.966052 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:51:26 crc kubenswrapper[4650]: E0201 07:51:26.966489 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:33 crc kubenswrapper[4650]: I0201 07:51:33.966405 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:51:33 crc kubenswrapper[4650]: I0201 07:51:33.967274 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:51:33 crc kubenswrapper[4650]: E0201 07:51:33.967863 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:51:36 crc kubenswrapper[4650]: I0201 07:51:36.965102 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:51:36 crc kubenswrapper[4650]: E0201 07:51:36.965669 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:51:41 crc kubenswrapper[4650]: I0201 07:51:41.981505 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:51:41 crc kubenswrapper[4650]: I0201 07:51:41.982276 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:51:41 crc kubenswrapper[4650]: I0201 07:51:41.982434 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:51:42 crc kubenswrapper[4650]: I0201 07:51:42.602419 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9"} Feb 01 07:51:42 crc kubenswrapper[4650]: I0201 07:51:42.602769 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754"} Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.640972 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9"} Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.640996 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" exitCode=1 Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.644753 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" exitCode=1 Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.644880 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590" exitCode=1 Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.644968 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" exitCode=1 Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.641714 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.645697 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.645810 4650 scope.go:117] "RemoveContainer" containerID="f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.645830 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.646320 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754"} Feb 01 07:51:43 crc kubenswrapper[4650]: E0201 07:51:43.646396 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.646413 4650 scope.go:117] "RemoveContainer" containerID="378cfc7574e20c98e537ccd775e8bc89e77c988ef0e740218da30df4d09b8443" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.646427 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590"} Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.646539 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950"} Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.736822 4650 scope.go:117] "RemoveContainer" containerID="362254a59777837721a9c0e88591767bc97cafc591ba841c173f38ac5e5fcffc" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.791098 4650 scope.go:117] "RemoveContainer" containerID="e6dc364e85738df90c32cbd434759e3f0e7d1ab1e42c31023453e8704d13f08b" Feb 01 07:51:43 crc kubenswrapper[4650]: I0201 07:51:43.847305 4650 scope.go:117] "RemoveContainer" containerID="dab7754d84cb87e204e550d6f85041ebfdd146c1054f1bc3fd15377bbc7bc287" Feb 01 07:51:44 crc kubenswrapper[4650]: I0201 07:51:44.665820 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:51:44 crc kubenswrapper[4650]: I0201 07:51:44.665886 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:51:44 crc kubenswrapper[4650]: I0201 07:51:44.665959 4650 scope.go:117] "RemoveContainer" containerID="f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590" Feb 01 07:51:44 crc kubenswrapper[4650]: I0201 07:51:44.665965 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:51:44 crc kubenswrapper[4650]: E0201 07:51:44.666413 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:45 crc kubenswrapper[4650]: I0201 07:51:45.675373 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:51:45 crc kubenswrapper[4650]: I0201 07:51:45.675761 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:51:45 crc kubenswrapper[4650]: I0201 07:51:45.675858 4650 scope.go:117] "RemoveContainer" containerID="f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590" Feb 01 07:51:45 crc kubenswrapper[4650]: I0201 07:51:45.675867 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:51:45 crc kubenswrapper[4650]: E0201 07:51:45.676336 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:45 crc kubenswrapper[4650]: I0201 07:51:45.965337 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:51:45 crc kubenswrapper[4650]: I0201 07:51:45.965715 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:51:45 crc kubenswrapper[4650]: E0201 07:51:45.966105 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:51:48 crc kubenswrapper[4650]: I0201 07:51:48.964663 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:51:48 crc kubenswrapper[4650]: E0201 07:51:48.965206 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:51:52 crc kubenswrapper[4650]: I0201 07:51:52.042579 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-5gv78"] Feb 01 07:51:52 crc kubenswrapper[4650]: I0201 07:51:52.053880 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-5gv78"] Feb 01 07:51:53 crc kubenswrapper[4650]: I0201 07:51:53.987902 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16da8114-b11c-449a-8cf7-17c1980cdcf7" path="/var/lib/kubelet/pods/16da8114-b11c-449a-8cf7-17c1980cdcf7/volumes" Feb 01 07:51:55 crc kubenswrapper[4650]: I0201 07:51:55.966690 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:51:55 crc kubenswrapper[4650]: I0201 07:51:55.967624 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:51:55 crc kubenswrapper[4650]: I0201 07:51:55.967815 4650 scope.go:117] "RemoveContainer" containerID="f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590" Feb 01 07:51:55 crc kubenswrapper[4650]: I0201 07:51:55.967836 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:51:55 crc kubenswrapper[4650]: E0201 07:51:55.968649 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:51:59 crc kubenswrapper[4650]: I0201 07:51:59.966306 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:51:59 crc kubenswrapper[4650]: I0201 07:51:59.966743 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:51:59 crc kubenswrapper[4650]: E0201 07:51:59.967281 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:52:02 crc kubenswrapper[4650]: I0201 07:52:02.965932 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:52:02 crc kubenswrapper[4650]: E0201 07:52:02.968279 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:52:04 crc kubenswrapper[4650]: I0201 07:52:04.053098 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-d4zk4"] Feb 01 07:52:04 crc kubenswrapper[4650]: I0201 07:52:04.064699 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-d4zk4"] Feb 01 07:52:05 crc kubenswrapper[4650]: I0201 07:52:05.985362 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d6a29ee-be36-4454-bf92-6dfffd45687b" path="/var/lib/kubelet/pods/9d6a29ee-be36-4454-bf92-6dfffd45687b/volumes" Feb 01 07:52:07 crc kubenswrapper[4650]: I0201 07:52:07.966775 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:52:07 crc kubenswrapper[4650]: I0201 07:52:07.967554 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:52:07 crc kubenswrapper[4650]: I0201 07:52:07.967706 4650 scope.go:117] "RemoveContainer" containerID="f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590" Feb 01 07:52:07 crc kubenswrapper[4650]: I0201 07:52:07.967719 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:52:08 crc kubenswrapper[4650]: E0201 07:52:08.212283 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:52:08 crc kubenswrapper[4650]: I0201 07:52:08.940925 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7"} Feb 01 07:52:08 crc kubenswrapper[4650]: I0201 07:52:08.942208 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:52:08 crc kubenswrapper[4650]: I0201 07:52:08.942294 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:52:08 crc kubenswrapper[4650]: I0201 07:52:08.942418 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:52:08 crc kubenswrapper[4650]: E0201 07:52:08.942950 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:52:10 crc kubenswrapper[4650]: I0201 07:52:10.042545 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-z4nfj"] Feb 01 07:52:10 crc kubenswrapper[4650]: I0201 07:52:10.060410 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-z4nfj"] Feb 01 07:52:10 crc kubenswrapper[4650]: I0201 07:52:10.964956 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:52:10 crc kubenswrapper[4650]: I0201 07:52:10.965317 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:52:10 crc kubenswrapper[4650]: E0201 07:52:10.965522 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:52:11 crc kubenswrapper[4650]: I0201 07:52:11.984178 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0b99236-33b9-4191-8139-8afbda8a3329" path="/var/lib/kubelet/pods/b0b99236-33b9-4191-8139-8afbda8a3329/volumes" Feb 01 07:52:14 crc kubenswrapper[4650]: I0201 07:52:14.965705 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:52:14 crc kubenswrapper[4650]: E0201 07:52:14.966862 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:52:21 crc kubenswrapper[4650]: I0201 07:52:21.965990 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:52:21 crc kubenswrapper[4650]: I0201 07:52:21.966660 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:52:21 crc kubenswrapper[4650]: E0201 07:52:21.967171 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:52:22 crc kubenswrapper[4650]: I0201 07:52:22.966353 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:52:22 crc kubenswrapper[4650]: I0201 07:52:22.966442 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:52:22 crc kubenswrapper[4650]: I0201 07:52:22.966565 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:52:22 crc kubenswrapper[4650]: E0201 07:52:22.966917 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:52:23 crc kubenswrapper[4650]: I0201 07:52:23.042921 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-kzjnq"] Feb 01 07:52:23 crc kubenswrapper[4650]: I0201 07:52:23.059812 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-x99cv"] Feb 01 07:52:23 crc kubenswrapper[4650]: I0201 07:52:23.066904 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-x99cv"] Feb 01 07:52:23 crc kubenswrapper[4650]: I0201 07:52:23.074334 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-kzjnq"] Feb 01 07:52:23 crc kubenswrapper[4650]: I0201 07:52:23.980089 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00154668-79cc-4c4d-81f9-e7975168f700" path="/var/lib/kubelet/pods/00154668-79cc-4c4d-81f9-e7975168f700/volumes" Feb 01 07:52:23 crc kubenswrapper[4650]: I0201 07:52:23.982915 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2208b1dc-dbac-498a-a760-21257b722e80" path="/var/lib/kubelet/pods/2208b1dc-dbac-498a-a760-21257b722e80/volumes" Feb 01 07:52:26 crc kubenswrapper[4650]: I0201 07:52:26.276215 4650 scope.go:117] "RemoveContainer" containerID="23a50bfc95a722d92ac978351b6831fe4a48bd989557bce3777ce7609251cbe2" Feb 01 07:52:26 crc kubenswrapper[4650]: I0201 07:52:26.329361 4650 scope.go:117] "RemoveContainer" containerID="3f593a11994ea5a5c968f9dfb5d93197e2f4bbb0e46afaf85e0d37a73181b3d5" Feb 01 07:52:26 crc kubenswrapper[4650]: I0201 07:52:26.413340 4650 scope.go:117] "RemoveContainer" containerID="99550a12eafed3327c442223efb94cccfd671910766cf2c64f63138a238ccfb4" Feb 01 07:52:26 crc kubenswrapper[4650]: I0201 07:52:26.470284 4650 scope.go:117] "RemoveContainer" containerID="56ccf51c02b67d917b039a07a1d5a5a8ac5d69569a9595db4ffe6c390d10bd0e" Feb 01 07:52:26 crc kubenswrapper[4650]: I0201 07:52:26.524488 4650 scope.go:117] "RemoveContainer" containerID="6c4f59f47f499c833fd050a42711bc0cd8616de68ce9a799bc57cf737b8ca09b" Feb 01 07:52:29 crc kubenswrapper[4650]: I0201 07:52:29.965245 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:52:29 crc kubenswrapper[4650]: E0201 07:52:29.965572 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:52:34 crc kubenswrapper[4650]: I0201 07:52:34.965860 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:52:34 crc kubenswrapper[4650]: I0201 07:52:34.966351 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:52:34 crc kubenswrapper[4650]: I0201 07:52:34.966385 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:52:34 crc kubenswrapper[4650]: I0201 07:52:34.966461 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:52:34 crc kubenswrapper[4650]: I0201 07:52:34.966571 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:52:34 crc kubenswrapper[4650]: E0201 07:52:34.966828 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:52:34 crc kubenswrapper[4650]: E0201 07:52:34.968281 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:52:41 crc kubenswrapper[4650]: I0201 07:52:41.984207 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:52:41 crc kubenswrapper[4650]: E0201 07:52:41.984929 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:52:42 crc kubenswrapper[4650]: I0201 07:52:42.822314 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:52:42 crc kubenswrapper[4650]: E0201 07:52:42.822475 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:52:42 crc kubenswrapper[4650]: E0201 07:52:42.822556 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:54:44.822535331 +0000 UTC m=+1883.545633586 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:52:45 crc kubenswrapper[4650]: E0201 07:52:45.956112 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:52:45 crc kubenswrapper[4650]: I0201 07:52:45.965783 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:52:45 crc kubenswrapper[4650]: I0201 07:52:45.965869 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:52:45 crc kubenswrapper[4650]: I0201 07:52:45.965973 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:52:45 crc kubenswrapper[4650]: E0201 07:52:45.966481 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:52:46 crc kubenswrapper[4650]: I0201 07:52:46.320011 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:52:49 crc kubenswrapper[4650]: I0201 07:52:49.970128 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:52:49 crc kubenswrapper[4650]: I0201 07:52:49.970602 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:52:49 crc kubenswrapper[4650]: E0201 07:52:49.970934 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:52:56 crc kubenswrapper[4650]: I0201 07:52:56.965371 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:52:56 crc kubenswrapper[4650]: I0201 07:52:56.966078 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:52:56 crc kubenswrapper[4650]: I0201 07:52:56.966257 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:52:56 crc kubenswrapper[4650]: I0201 07:52:56.966444 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:52:56 crc kubenswrapper[4650]: E0201 07:52:56.967266 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:52:56 crc kubenswrapper[4650]: E0201 07:52:56.967799 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:53:04 crc kubenswrapper[4650]: I0201 07:53:04.965948 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:04 crc kubenswrapper[4650]: I0201 07:53:04.966473 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:53:04 crc kubenswrapper[4650]: E0201 07:53:04.966775 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:10 crc kubenswrapper[4650]: I0201 07:53:10.966899 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:53:10 crc kubenswrapper[4650]: I0201 07:53:10.970437 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:53:10 crc kubenswrapper[4650]: I0201 07:53:10.970636 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:53:10 crc kubenswrapper[4650]: E0201 07:53:10.971296 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:53:11 crc kubenswrapper[4650]: I0201 07:53:11.979430 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:53:11 crc kubenswrapper[4650]: E0201 07:53:11.980614 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:53:19 crc kubenswrapper[4650]: I0201 07:53:19.966712 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:19 crc kubenswrapper[4650]: I0201 07:53:19.967371 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:53:19 crc kubenswrapper[4650]: E0201 07:53:19.967854 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:22 crc kubenswrapper[4650]: I0201 07:53:22.966953 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:53:22 crc kubenswrapper[4650]: I0201 07:53:22.967656 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:53:22 crc kubenswrapper[4650]: I0201 07:53:22.967837 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:53:22 crc kubenswrapper[4650]: E0201 07:53:22.968484 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.718370 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" exitCode=1 Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.718435 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e"} Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.718928 4650 scope.go:117] "RemoveContainer" containerID="be340d6125db32ca6fa300a2a4dbcacc27c7219c0bcf3969b185e0315c851561" Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.720306 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.720413 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.720456 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.720614 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:53:24 crc kubenswrapper[4650]: E0201 07:53:24.722740 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:53:24 crc kubenswrapper[4650]: I0201 07:53:24.965800 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:53:24 crc kubenswrapper[4650]: E0201 07:53:24.966164 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.069775 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-85ntk"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.082922 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-86k28"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.093804 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-9wwvn"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.104643 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-86k28"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.111828 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-d87a-account-create-update-gd2x9"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.117907 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-85ntk"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.124635 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-9wwvn"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.129676 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-d87a-account-create-update-gd2x9"] Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.983363 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f2aa3a7-ab48-4686-b15a-4333b52302a2" path="/var/lib/kubelet/pods/3f2aa3a7-ab48-4686-b15a-4333b52302a2/volumes" Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.985175 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b50b989-57dd-4a03-99ad-c46a180a3136" path="/var/lib/kubelet/pods/4b50b989-57dd-4a03-99ad-c46a180a3136/volumes" Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.986418 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c7dfd53-9a47-402b-951c-e785181e81a2" path="/var/lib/kubelet/pods/6c7dfd53-9a47-402b-951c-e785181e81a2/volumes" Feb 01 07:53:27 crc kubenswrapper[4650]: I0201 07:53:27.987608 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6d6109a-7337-4d9b-bb82-b0f778d843c7" path="/var/lib/kubelet/pods/a6d6109a-7337-4d9b-bb82-b0f778d843c7/volumes" Feb 01 07:53:28 crc kubenswrapper[4650]: I0201 07:53:28.061163 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b72f-account-create-update-29vrc"] Feb 01 07:53:28 crc kubenswrapper[4650]: I0201 07:53:28.071831 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-b72f-account-create-update-29vrc"] Feb 01 07:53:28 crc kubenswrapper[4650]: I0201 07:53:28.082551 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f8ab-account-create-update-htkmw"] Feb 01 07:53:28 crc kubenswrapper[4650]: I0201 07:53:28.088961 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f8ab-account-create-update-htkmw"] Feb 01 07:53:29 crc kubenswrapper[4650]: I0201 07:53:29.980231 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="676d06ca-f3de-4ce7-b782-0588cc433361" path="/var/lib/kubelet/pods/676d06ca-f3de-4ce7-b782-0588cc433361/volumes" Feb 01 07:53:29 crc kubenswrapper[4650]: I0201 07:53:29.981420 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3803000-bb1d-4d78-a52c-a754d805449b" path="/var/lib/kubelet/pods/d3803000-bb1d-4d78-a52c-a754d805449b/volumes" Feb 01 07:53:33 crc kubenswrapper[4650]: I0201 07:53:33.965774 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:33 crc kubenswrapper[4650]: I0201 07:53:33.967821 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:53:33 crc kubenswrapper[4650]: E0201 07:53:33.968573 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:37 crc kubenswrapper[4650]: I0201 07:53:37.967103 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:53:38 crc kubenswrapper[4650]: I0201 07:53:38.876860 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"a1f111be91cc1e9fcafc3706031d6b3031dfcc3555d9fa4700a7b38ad3a07c2d"} Feb 01 07:53:39 crc kubenswrapper[4650]: I0201 07:53:39.966376 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:53:39 crc kubenswrapper[4650]: I0201 07:53:39.966977 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:53:39 crc kubenswrapper[4650]: I0201 07:53:39.967052 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:53:39 crc kubenswrapper[4650]: I0201 07:53:39.967184 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:53:39 crc kubenswrapper[4650]: E0201 07:53:39.967986 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:53:46 crc kubenswrapper[4650]: I0201 07:53:46.967526 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:46 crc kubenswrapper[4650]: I0201 07:53:46.968194 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:53:47 crc kubenswrapper[4650]: E0201 07:53:47.211340 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:47 crc kubenswrapper[4650]: I0201 07:53:47.989365 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a"} Feb 01 07:53:47 crc kubenswrapper[4650]: I0201 07:53:47.989789 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:53:47 crc kubenswrapper[4650]: I0201 07:53:47.991015 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:47 crc kubenswrapper[4650]: E0201 07:53:47.992500 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:49 crc kubenswrapper[4650]: I0201 07:53:49.004174 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" exitCode=1 Feb 01 07:53:49 crc kubenswrapper[4650]: I0201 07:53:49.004298 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a"} Feb 01 07:53:49 crc kubenswrapper[4650]: I0201 07:53:49.004617 4650 scope.go:117] "RemoveContainer" containerID="f39abfb443c22d54b2664bf44e749d61039b5eaa28af32bc00591f38e0ccb871" Feb 01 07:53:49 crc kubenswrapper[4650]: I0201 07:53:49.006221 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:49 crc kubenswrapper[4650]: I0201 07:53:49.006320 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:53:49 crc kubenswrapper[4650]: E0201 07:53:49.006935 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:50 crc kubenswrapper[4650]: I0201 07:53:50.018377 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:50 crc kubenswrapper[4650]: I0201 07:53:50.019455 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:53:50 crc kubenswrapper[4650]: E0201 07:53:50.019914 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:50 crc kubenswrapper[4650]: I0201 07:53:50.966301 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:53:50 crc kubenswrapper[4650]: I0201 07:53:50.966866 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:53:50 crc kubenswrapper[4650]: I0201 07:53:50.966931 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:53:50 crc kubenswrapper[4650]: I0201 07:53:50.967179 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:53:50 crc kubenswrapper[4650]: E0201 07:53:50.968020 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:53:51 crc kubenswrapper[4650]: I0201 07:53:51.800592 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:53:51 crc kubenswrapper[4650]: I0201 07:53:51.801465 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:53:51 crc kubenswrapper[4650]: I0201 07:53:51.801481 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:53:51 crc kubenswrapper[4650]: E0201 07:53:51.801857 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:53:56 crc kubenswrapper[4650]: I0201 07:53:56.053944 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9hm6l"] Feb 01 07:53:56 crc kubenswrapper[4650]: I0201 07:53:56.069453 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9hm6l"] Feb 01 07:53:57 crc kubenswrapper[4650]: I0201 07:53:57.981676 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="364e7c65-e9d5-4a41-b87b-62b8da17e636" path="/var/lib/kubelet/pods/364e7c65-e9d5-4a41-b87b-62b8da17e636/volumes" Feb 01 07:54:03 crc kubenswrapper[4650]: I0201 07:54:03.965917 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:54:03 crc kubenswrapper[4650]: I0201 07:54:03.966648 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:54:03 crc kubenswrapper[4650]: E0201 07:54:03.967165 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:54:04 crc kubenswrapper[4650]: I0201 07:54:04.966852 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:54:04 crc kubenswrapper[4650]: I0201 07:54:04.966997 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:54:04 crc kubenswrapper[4650]: I0201 07:54:04.967075 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:54:04 crc kubenswrapper[4650]: I0201 07:54:04.967208 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:54:04 crc kubenswrapper[4650]: E0201 07:54:04.967870 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:54:16 crc kubenswrapper[4650]: I0201 07:54:16.966670 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:54:16 crc kubenswrapper[4650]: I0201 07:54:16.968257 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:54:16 crc kubenswrapper[4650]: I0201 07:54:16.968319 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:54:16 crc kubenswrapper[4650]: I0201 07:54:16.968471 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:54:16 crc kubenswrapper[4650]: E0201 07:54:16.969186 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:54:17 crc kubenswrapper[4650]: I0201 07:54:17.966844 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:54:17 crc kubenswrapper[4650]: I0201 07:54:17.967204 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:54:17 crc kubenswrapper[4650]: E0201 07:54:17.967653 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:54:19 crc kubenswrapper[4650]: I0201 07:54:19.060860 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-kd98m"] Feb 01 07:54:19 crc kubenswrapper[4650]: I0201 07:54:19.069390 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-kd98m"] Feb 01 07:54:19 crc kubenswrapper[4650]: I0201 07:54:19.978277 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fabe1af1-e17e-4cb8-9b5f-0def0d4ff277" path="/var/lib/kubelet/pods/fabe1af1-e17e-4cb8-9b5f-0def0d4ff277/volumes" Feb 01 07:54:21 crc kubenswrapper[4650]: I0201 07:54:21.046169 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-jzxjl"] Feb 01 07:54:21 crc kubenswrapper[4650]: I0201 07:54:21.055120 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-jzxjl"] Feb 01 07:54:22 crc kubenswrapper[4650]: I0201 07:54:22.014561 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="471ec131-07c2-4fd8-a63a-e36c42859d92" path="/var/lib/kubelet/pods/471ec131-07c2-4fd8-a63a-e36c42859d92/volumes" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.667597 4650 scope.go:117] "RemoveContainer" containerID="be31af16eaf0b1e0f020f9f1e29766b14c92b7dba820b53b47f4ebfc058c0e05" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.719561 4650 scope.go:117] "RemoveContainer" containerID="14dad6be3549ceffb86a1653b9ed525a1992ac37c1e02f90a17d34bf5db4a5ff" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.789646 4650 scope.go:117] "RemoveContainer" containerID="13d013ec21c57ca9189304a2c395d16127f381d035294f356846444028efff15" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.821083 4650 scope.go:117] "RemoveContainer" containerID="a37ed01bc81d8b11b267f4bc117b887d794544f411aee0319ba0e54edf32931f" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.876516 4650 scope.go:117] "RemoveContainer" containerID="5eb41c68237ec3cc6da92c18ba0065e2df3459fcd12210f2fea28d0e6566d418" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.911416 4650 scope.go:117] "RemoveContainer" containerID="875bb1d6c20e03a90968857d8f702027a85f51f6510abbd1505074ba3f2d143a" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.940563 4650 scope.go:117] "RemoveContainer" containerID="556271492f85f4aa5fdb1000f88331bfece93f7f4407518eda99552ba9aee135" Feb 01 07:54:26 crc kubenswrapper[4650]: I0201 07:54:26.967566 4650 scope.go:117] "RemoveContainer" containerID="7d4409e31e756b8c82c24433231b02179bb889b7eac8403081f27f4d1c6b222c" Feb 01 07:54:27 crc kubenswrapper[4650]: I0201 07:54:27.004969 4650 scope.go:117] "RemoveContainer" containerID="91004af82472041ae0ff9ed06b49bb5ea68b64403a72337f6408e7f5f8701466" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.419930 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9hn8j"] Feb 01 07:54:28 crc kubenswrapper[4650]: E0201 07:54:28.420740 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="extract-content" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.420755 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="extract-content" Feb 01 07:54:28 crc kubenswrapper[4650]: E0201 07:54:28.420776 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="extract-utilities" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.420785 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="extract-utilities" Feb 01 07:54:28 crc kubenswrapper[4650]: E0201 07:54:28.420804 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="registry-server" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.420811 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="registry-server" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.421065 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="42d53310-48e3-4c5b-846c-b5f2cf1f1877" containerName="registry-server" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.422686 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.431172 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9hn8j"] Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.563361 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-utilities\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.563501 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrtts\" (UniqueName: \"kubernetes.io/projected/a784dfb4-637a-4eeb-8db2-850bc25a72db-kube-api-access-xrtts\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.563530 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-catalog-content\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.590800 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b84cq"] Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.595431 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.601136 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b84cq"] Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.665246 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-utilities\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.665367 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrtts\" (UniqueName: \"kubernetes.io/projected/a784dfb4-637a-4eeb-8db2-850bc25a72db-kube-api-access-xrtts\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.665412 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-catalog-content\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.665867 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-catalog-content\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.666221 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-utilities\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.696166 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrtts\" (UniqueName: \"kubernetes.io/projected/a784dfb4-637a-4eeb-8db2-850bc25a72db-kube-api-access-xrtts\") pod \"community-operators-9hn8j\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.754591 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.767089 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-utilities\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.767141 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79xxp\" (UniqueName: \"kubernetes.io/projected/5538e2bb-a1bb-483c-a941-8e0b7074033e-kube-api-access-79xxp\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.767218 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-catalog-content\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.869913 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-utilities\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.870654 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79xxp\" (UniqueName: \"kubernetes.io/projected/5538e2bb-a1bb-483c-a941-8e0b7074033e-kube-api-access-79xxp\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.870905 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-catalog-content\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.870705 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-utilities\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.872440 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-catalog-content\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.897816 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79xxp\" (UniqueName: \"kubernetes.io/projected/5538e2bb-a1bb-483c-a941-8e0b7074033e-kube-api-access-79xxp\") pod \"redhat-marketplace-b84cq\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.942987 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.966220 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.966282 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.966323 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:54:28 crc kubenswrapper[4650]: I0201 07:54:28.966379 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:54:28 crc kubenswrapper[4650]: E0201 07:54:28.967073 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:54:29 crc kubenswrapper[4650]: I0201 07:54:29.181424 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9hn8j"] Feb 01 07:54:29 crc kubenswrapper[4650]: W0201 07:54:29.185586 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda784dfb4_637a_4eeb_8db2_850bc25a72db.slice/crio-ec4afa07b6c1fb0076035603ff9d2102064426e40b7fdf9ea17116e167efaa34 WatchSource:0}: Error finding container ec4afa07b6c1fb0076035603ff9d2102064426e40b7fdf9ea17116e167efaa34: Status 404 returned error can't find the container with id ec4afa07b6c1fb0076035603ff9d2102064426e40b7fdf9ea17116e167efaa34 Feb 01 07:54:29 crc kubenswrapper[4650]: I0201 07:54:29.379573 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b84cq"] Feb 01 07:54:29 crc kubenswrapper[4650]: W0201 07:54:29.392108 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5538e2bb_a1bb_483c_a941_8e0b7074033e.slice/crio-99201480c8a76c804e8cbfa5108203c16d2ef5a8779ceacce3dfc36d8478ecf9 WatchSource:0}: Error finding container 99201480c8a76c804e8cbfa5108203c16d2ef5a8779ceacce3dfc36d8478ecf9: Status 404 returned error can't find the container with id 99201480c8a76c804e8cbfa5108203c16d2ef5a8779ceacce3dfc36d8478ecf9 Feb 01 07:54:29 crc kubenswrapper[4650]: I0201 07:54:29.433676 4650 generic.go:334] "Generic (PLEG): container finished" podID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerID="95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94" exitCode=0 Feb 01 07:54:29 crc kubenswrapper[4650]: I0201 07:54:29.433738 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hn8j" event={"ID":"a784dfb4-637a-4eeb-8db2-850bc25a72db","Type":"ContainerDied","Data":"95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94"} Feb 01 07:54:29 crc kubenswrapper[4650]: I0201 07:54:29.434034 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hn8j" event={"ID":"a784dfb4-637a-4eeb-8db2-850bc25a72db","Type":"ContainerStarted","Data":"ec4afa07b6c1fb0076035603ff9d2102064426e40b7fdf9ea17116e167efaa34"} Feb 01 07:54:29 crc kubenswrapper[4650]: I0201 07:54:29.441463 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b84cq" event={"ID":"5538e2bb-a1bb-483c-a941-8e0b7074033e","Type":"ContainerStarted","Data":"99201480c8a76c804e8cbfa5108203c16d2ef5a8779ceacce3dfc36d8478ecf9"} Feb 01 07:54:30 crc kubenswrapper[4650]: I0201 07:54:30.453414 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hn8j" event={"ID":"a784dfb4-637a-4eeb-8db2-850bc25a72db","Type":"ContainerStarted","Data":"3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54"} Feb 01 07:54:30 crc kubenswrapper[4650]: I0201 07:54:30.456625 4650 generic.go:334] "Generic (PLEG): container finished" podID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerID="8e4ec591bc3bc531fe244f2248b8ae57185effb06c0aeaf487c72091b9fa37e7" exitCode=0 Feb 01 07:54:30 crc kubenswrapper[4650]: I0201 07:54:30.456659 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b84cq" event={"ID":"5538e2bb-a1bb-483c-a941-8e0b7074033e","Type":"ContainerDied","Data":"8e4ec591bc3bc531fe244f2248b8ae57185effb06c0aeaf487c72091b9fa37e7"} Feb 01 07:54:31 crc kubenswrapper[4650]: I0201 07:54:31.474633 4650 generic.go:334] "Generic (PLEG): container finished" podID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerID="3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54" exitCode=0 Feb 01 07:54:31 crc kubenswrapper[4650]: I0201 07:54:31.474744 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hn8j" event={"ID":"a784dfb4-637a-4eeb-8db2-850bc25a72db","Type":"ContainerDied","Data":"3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54"} Feb 01 07:54:31 crc kubenswrapper[4650]: I0201 07:54:31.478774 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b84cq" event={"ID":"5538e2bb-a1bb-483c-a941-8e0b7074033e","Type":"ContainerStarted","Data":"2896e3364d788d4edef4aa45fb0c80e14c3b1da525c33c8c52edd819f45ff5eb"} Feb 01 07:54:32 crc kubenswrapper[4650]: I0201 07:54:32.491963 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hn8j" event={"ID":"a784dfb4-637a-4eeb-8db2-850bc25a72db","Type":"ContainerStarted","Data":"a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f"} Feb 01 07:54:32 crc kubenswrapper[4650]: I0201 07:54:32.518658 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9hn8j" podStartSLOduration=2.053479893 podStartE2EDuration="4.518641825s" podCreationTimestamp="2026-02-01 07:54:28 +0000 UTC" firstStartedPulling="2026-02-01 07:54:29.436975057 +0000 UTC m=+1868.160073302" lastFinishedPulling="2026-02-01 07:54:31.902136959 +0000 UTC m=+1870.625235234" observedRunningTime="2026-02-01 07:54:32.5108125 +0000 UTC m=+1871.233910755" watchObservedRunningTime="2026-02-01 07:54:32.518641825 +0000 UTC m=+1871.241740070" Feb 01 07:54:32 crc kubenswrapper[4650]: I0201 07:54:32.965120 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:54:32 crc kubenswrapper[4650]: I0201 07:54:32.965188 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:54:32 crc kubenswrapper[4650]: E0201 07:54:32.965644 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:54:33 crc kubenswrapper[4650]: I0201 07:54:33.500363 4650 generic.go:334] "Generic (PLEG): container finished" podID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerID="2896e3364d788d4edef4aa45fb0c80e14c3b1da525c33c8c52edd819f45ff5eb" exitCode=0 Feb 01 07:54:33 crc kubenswrapper[4650]: I0201 07:54:33.500435 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b84cq" event={"ID":"5538e2bb-a1bb-483c-a941-8e0b7074033e","Type":"ContainerDied","Data":"2896e3364d788d4edef4aa45fb0c80e14c3b1da525c33c8c52edd819f45ff5eb"} Feb 01 07:54:34 crc kubenswrapper[4650]: I0201 07:54:34.511643 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b84cq" event={"ID":"5538e2bb-a1bb-483c-a941-8e0b7074033e","Type":"ContainerStarted","Data":"e4eab0f865143545d8fc8c727dd321f241d6c4729a48a4b8f163226005bce7fc"} Feb 01 07:54:34 crc kubenswrapper[4650]: I0201 07:54:34.536391 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b84cq" podStartSLOduration=3.08026412 podStartE2EDuration="6.536373551s" podCreationTimestamp="2026-02-01 07:54:28 +0000 UTC" firstStartedPulling="2026-02-01 07:54:30.458974259 +0000 UTC m=+1869.182072504" lastFinishedPulling="2026-02-01 07:54:33.91508368 +0000 UTC m=+1872.638181935" observedRunningTime="2026-02-01 07:54:34.528516335 +0000 UTC m=+1873.251614570" watchObservedRunningTime="2026-02-01 07:54:34.536373551 +0000 UTC m=+1873.259471806" Feb 01 07:54:38 crc kubenswrapper[4650]: I0201 07:54:38.755331 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:38 crc kubenswrapper[4650]: I0201 07:54:38.755988 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:38 crc kubenswrapper[4650]: I0201 07:54:38.943490 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:38 crc kubenswrapper[4650]: I0201 07:54:38.943541 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:39 crc kubenswrapper[4650]: I0201 07:54:39.815222 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-9hn8j" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="registry-server" probeResult="failure" output=< Feb 01 07:54:39 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 07:54:39 crc kubenswrapper[4650]: > Feb 01 07:54:39 crc kubenswrapper[4650]: I0201 07:54:39.991881 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-b84cq" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="registry-server" probeResult="failure" output=< Feb 01 07:54:39 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 07:54:39 crc kubenswrapper[4650]: > Feb 01 07:54:41 crc kubenswrapper[4650]: I0201 07:54:41.972111 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:54:41 crc kubenswrapper[4650]: I0201 07:54:41.972536 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:54:41 crc kubenswrapper[4650]: I0201 07:54:41.972568 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:54:41 crc kubenswrapper[4650]: I0201 07:54:41.972645 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:54:41 crc kubenswrapper[4650]: E0201 07:54:41.973058 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:54:44 crc kubenswrapper[4650]: I0201 07:54:44.924680 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:54:44 crc kubenswrapper[4650]: E0201 07:54:44.925140 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:54:44 crc kubenswrapper[4650]: E0201 07:54:44.925195 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:56:46.925177958 +0000 UTC m=+2005.648276193 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:54:45 crc kubenswrapper[4650]: I0201 07:54:45.966440 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:54:45 crc kubenswrapper[4650]: I0201 07:54:45.966788 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:54:45 crc kubenswrapper[4650]: E0201 07:54:45.967267 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:54:48 crc kubenswrapper[4650]: I0201 07:54:48.851441 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:49 crc kubenswrapper[4650]: I0201 07:54:49.048611 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:49 crc kubenswrapper[4650]: I0201 07:54:49.067676 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:49 crc kubenswrapper[4650]: I0201 07:54:49.111229 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:49 crc kubenswrapper[4650]: E0201 07:54:49.321983 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:54:49 crc kubenswrapper[4650]: I0201 07:54:49.642366 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:54:50 crc kubenswrapper[4650]: I0201 07:54:50.916078 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9hn8j"] Feb 01 07:54:50 crc kubenswrapper[4650]: I0201 07:54:50.916698 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9hn8j" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="registry-server" containerID="cri-o://a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f" gracePeriod=2 Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.510727 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.512768 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b84cq"] Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.513103 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b84cq" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="registry-server" containerID="cri-o://e4eab0f865143545d8fc8c727dd321f241d6c4729a48a4b8f163226005bce7fc" gracePeriod=2 Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.581292 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-catalog-content\") pod \"a784dfb4-637a-4eeb-8db2-850bc25a72db\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.581413 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-utilities\") pod \"a784dfb4-637a-4eeb-8db2-850bc25a72db\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.581471 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrtts\" (UniqueName: \"kubernetes.io/projected/a784dfb4-637a-4eeb-8db2-850bc25a72db-kube-api-access-xrtts\") pod \"a784dfb4-637a-4eeb-8db2-850bc25a72db\" (UID: \"a784dfb4-637a-4eeb-8db2-850bc25a72db\") " Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.594560 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a784dfb4-637a-4eeb-8db2-850bc25a72db-kube-api-access-xrtts" (OuterVolumeSpecName: "kube-api-access-xrtts") pod "a784dfb4-637a-4eeb-8db2-850bc25a72db" (UID: "a784dfb4-637a-4eeb-8db2-850bc25a72db"). InnerVolumeSpecName "kube-api-access-xrtts". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.594779 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-utilities" (OuterVolumeSpecName: "utilities") pod "a784dfb4-637a-4eeb-8db2-850bc25a72db" (UID: "a784dfb4-637a-4eeb-8db2-850bc25a72db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.659461 4650 generic.go:334] "Generic (PLEG): container finished" podID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerID="a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f" exitCode=0 Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.659529 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hn8j" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.659514 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hn8j" event={"ID":"a784dfb4-637a-4eeb-8db2-850bc25a72db","Type":"ContainerDied","Data":"a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f"} Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.659695 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hn8j" event={"ID":"a784dfb4-637a-4eeb-8db2-850bc25a72db","Type":"ContainerDied","Data":"ec4afa07b6c1fb0076035603ff9d2102064426e40b7fdf9ea17116e167efaa34"} Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.659738 4650 scope.go:117] "RemoveContainer" containerID="a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.659900 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a784dfb4-637a-4eeb-8db2-850bc25a72db" (UID: "a784dfb4-637a-4eeb-8db2-850bc25a72db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.663090 4650 generic.go:334] "Generic (PLEG): container finished" podID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerID="e4eab0f865143545d8fc8c727dd321f241d6c4729a48a4b8f163226005bce7fc" exitCode=0 Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.663117 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b84cq" event={"ID":"5538e2bb-a1bb-483c-a941-8e0b7074033e","Type":"ContainerDied","Data":"e4eab0f865143545d8fc8c727dd321f241d6c4729a48a4b8f163226005bce7fc"} Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.683508 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.683539 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a784dfb4-637a-4eeb-8db2-850bc25a72db-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.683549 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrtts\" (UniqueName: \"kubernetes.io/projected/a784dfb4-637a-4eeb-8db2-850bc25a72db-kube-api-access-xrtts\") on node \"crc\" DevicePath \"\"" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.696713 4650 scope.go:117] "RemoveContainer" containerID="3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.736366 4650 scope.go:117] "RemoveContainer" containerID="95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.764437 4650 scope.go:117] "RemoveContainer" containerID="a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f" Feb 01 07:54:51 crc kubenswrapper[4650]: E0201 07:54:51.781857 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f\": container with ID starting with a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f not found: ID does not exist" containerID="a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.781896 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f"} err="failed to get container status \"a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f\": rpc error: code = NotFound desc = could not find container \"a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f\": container with ID starting with a93a06e0d538710204562d457dddd0b775d3d8adcb2fc4c54e7d22506bcbf45f not found: ID does not exist" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.781920 4650 scope.go:117] "RemoveContainer" containerID="3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54" Feb 01 07:54:51 crc kubenswrapper[4650]: E0201 07:54:51.782258 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54\": container with ID starting with 3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54 not found: ID does not exist" containerID="3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.782296 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54"} err="failed to get container status \"3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54\": rpc error: code = NotFound desc = could not find container \"3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54\": container with ID starting with 3c66ee01e75fab0ab8a6c156ae115440df1d1dfece304b8e0e8831c9a333fa54 not found: ID does not exist" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.782322 4650 scope.go:117] "RemoveContainer" containerID="95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94" Feb 01 07:54:51 crc kubenswrapper[4650]: E0201 07:54:51.782660 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94\": container with ID starting with 95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94 not found: ID does not exist" containerID="95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.782683 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94"} err="failed to get container status \"95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94\": rpc error: code = NotFound desc = could not find container \"95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94\": container with ID starting with 95499700d67574d71fe310d530085932d8cf6176bc7aafc7b555e5fbf91d5e94 not found: ID does not exist" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.850927 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.885848 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-utilities\") pod \"5538e2bb-a1bb-483c-a941-8e0b7074033e\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.886376 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-utilities" (OuterVolumeSpecName: "utilities") pod "5538e2bb-a1bb-483c-a941-8e0b7074033e" (UID: "5538e2bb-a1bb-483c-a941-8e0b7074033e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.886426 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-catalog-content\") pod \"5538e2bb-a1bb-483c-a941-8e0b7074033e\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.886526 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79xxp\" (UniqueName: \"kubernetes.io/projected/5538e2bb-a1bb-483c-a941-8e0b7074033e-kube-api-access-79xxp\") pod \"5538e2bb-a1bb-483c-a941-8e0b7074033e\" (UID: \"5538e2bb-a1bb-483c-a941-8e0b7074033e\") " Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.887772 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.889925 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5538e2bb-a1bb-483c-a941-8e0b7074033e-kube-api-access-79xxp" (OuterVolumeSpecName: "kube-api-access-79xxp") pod "5538e2bb-a1bb-483c-a941-8e0b7074033e" (UID: "5538e2bb-a1bb-483c-a941-8e0b7074033e"). InnerVolumeSpecName "kube-api-access-79xxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.903925 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5538e2bb-a1bb-483c-a941-8e0b7074033e" (UID: "5538e2bb-a1bb-483c-a941-8e0b7074033e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.990718 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5538e2bb-a1bb-483c-a941-8e0b7074033e-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 07:54:51 crc kubenswrapper[4650]: I0201 07:54:51.999397 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79xxp\" (UniqueName: \"kubernetes.io/projected/5538e2bb-a1bb-483c-a941-8e0b7074033e-kube-api-access-79xxp\") on node \"crc\" DevicePath \"\"" Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.005344 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9hn8j"] Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.012454 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9hn8j"] Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.675068 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b84cq" Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.675103 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b84cq" event={"ID":"5538e2bb-a1bb-483c-a941-8e0b7074033e","Type":"ContainerDied","Data":"99201480c8a76c804e8cbfa5108203c16d2ef5a8779ceacce3dfc36d8478ecf9"} Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.675206 4650 scope.go:117] "RemoveContainer" containerID="e4eab0f865143545d8fc8c727dd321f241d6c4729a48a4b8f163226005bce7fc" Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.704771 4650 scope.go:117] "RemoveContainer" containerID="2896e3364d788d4edef4aa45fb0c80e14c3b1da525c33c8c52edd819f45ff5eb" Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.710510 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b84cq"] Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.722449 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b84cq"] Feb 01 07:54:52 crc kubenswrapper[4650]: I0201 07:54:52.739359 4650 scope.go:117] "RemoveContainer" containerID="8e4ec591bc3bc531fe244f2248b8ae57185effb06c0aeaf487c72091b9fa37e7" Feb 01 07:54:53 crc kubenswrapper[4650]: I0201 07:54:53.980957 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" path="/var/lib/kubelet/pods/5538e2bb-a1bb-483c-a941-8e0b7074033e/volumes" Feb 01 07:54:53 crc kubenswrapper[4650]: I0201 07:54:53.983248 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" path="/var/lib/kubelet/pods/a784dfb4-637a-4eeb-8db2-850bc25a72db/volumes" Feb 01 07:54:56 crc kubenswrapper[4650]: I0201 07:54:56.966407 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:54:56 crc kubenswrapper[4650]: I0201 07:54:56.966918 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:54:56 crc kubenswrapper[4650]: I0201 07:54:56.966966 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:54:56 crc kubenswrapper[4650]: I0201 07:54:56.967113 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:54:57 crc kubenswrapper[4650]: E0201 07:54:57.234631 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:54:57 crc kubenswrapper[4650]: I0201 07:54:57.728005 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd"} Feb 01 07:54:57 crc kubenswrapper[4650]: I0201 07:54:57.728517 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:54:57 crc kubenswrapper[4650]: I0201 07:54:57.728578 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:54:57 crc kubenswrapper[4650]: I0201 07:54:57.728665 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:54:57 crc kubenswrapper[4650]: E0201 07:54:57.728914 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:54:58 crc kubenswrapper[4650]: I0201 07:54:58.967331 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:54:58 crc kubenswrapper[4650]: I0201 07:54:58.967890 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:54:58 crc kubenswrapper[4650]: E0201 07:54:58.969614 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:06 crc kubenswrapper[4650]: I0201 07:55:06.045175 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-gts9x"] Feb 01 07:55:06 crc kubenswrapper[4650]: I0201 07:55:06.052264 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-gts9x"] Feb 01 07:55:07 crc kubenswrapper[4650]: I0201 07:55:07.984794 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9feff00-76d8-4b55-a86c-5b5aabd5e7a6" path="/var/lib/kubelet/pods/c9feff00-76d8-4b55-a86c-5b5aabd5e7a6/volumes" Feb 01 07:55:11 crc kubenswrapper[4650]: I0201 07:55:11.973242 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:55:11 crc kubenswrapper[4650]: I0201 07:55:11.973693 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:12 crc kubenswrapper[4650]: E0201 07:55:12.183373 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:12 crc kubenswrapper[4650]: I0201 07:55:12.877799 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"3e8e9abcf03a13becab41449d9d3b061481a9e65fb41866ea32827e9ea671047"} Feb 01 07:55:12 crc kubenswrapper[4650]: I0201 07:55:12.878177 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:55:12 crc kubenswrapper[4650]: I0201 07:55:12.878775 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:12 crc kubenswrapper[4650]: E0201 07:55:12.879326 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:12 crc kubenswrapper[4650]: I0201 07:55:12.966667 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:55:12 crc kubenswrapper[4650]: I0201 07:55:12.966798 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:55:12 crc kubenswrapper[4650]: I0201 07:55:12.966996 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:55:12 crc kubenswrapper[4650]: E0201 07:55:12.967598 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:55:13 crc kubenswrapper[4650]: I0201 07:55:13.891325 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:13 crc kubenswrapper[4650]: E0201 07:55:13.891958 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:18 crc kubenswrapper[4650]: I0201 07:55:18.834895 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:19 crc kubenswrapper[4650]: I0201 07:55:19.821994 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:21 crc kubenswrapper[4650]: I0201 07:55:21.804956 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.805251 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.810264 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.810357 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.811316 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"3e8e9abcf03a13becab41449d9d3b061481a9e65fb41866ea32827e9ea671047"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.811358 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.811392 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://3e8e9abcf03a13becab41449d9d3b061481a9e65fb41866ea32827e9ea671047" gracePeriod=30 Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.817481 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.966366 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.966475 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:55:24 crc kubenswrapper[4650]: I0201 07:55:24.966630 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:55:24 crc kubenswrapper[4650]: E0201 07:55:24.967488 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:55:25 crc kubenswrapper[4650]: I0201 07:55:25.005805 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="3e8e9abcf03a13becab41449d9d3b061481a9e65fb41866ea32827e9ea671047" exitCode=0 Feb 01 07:55:25 crc kubenswrapper[4650]: I0201 07:55:25.005871 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"3e8e9abcf03a13becab41449d9d3b061481a9e65fb41866ea32827e9ea671047"} Feb 01 07:55:25 crc kubenswrapper[4650]: I0201 07:55:25.005911 4650 scope.go:117] "RemoveContainer" containerID="37b1ffb5a4596751bcf47bb2491eeefd6fdb7e32b15681c33535f213885a362b" Feb 01 07:55:25 crc kubenswrapper[4650]: E0201 07:55:25.140939 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:26 crc kubenswrapper[4650]: I0201 07:55:26.013741 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0"} Feb 01 07:55:26 crc kubenswrapper[4650]: I0201 07:55:26.014059 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:55:26 crc kubenswrapper[4650]: I0201 07:55:26.014442 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:26 crc kubenswrapper[4650]: E0201 07:55:26.014640 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:27 crc kubenswrapper[4650]: I0201 07:55:27.044294 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:27 crc kubenswrapper[4650]: E0201 07:55:27.044707 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:27 crc kubenswrapper[4650]: I0201 07:55:27.173703 4650 scope.go:117] "RemoveContainer" containerID="7ca780b1af15e2db895312a393c53206efc3db2dfc95bafaf856f097f3efbb93" Feb 01 07:55:30 crc kubenswrapper[4650]: I0201 07:55:30.807641 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:33 crc kubenswrapper[4650]: I0201 07:55:33.811763 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:34 crc kubenswrapper[4650]: I0201 07:55:34.810082 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:35 crc kubenswrapper[4650]: I0201 07:55:35.965895 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:55:35 crc kubenswrapper[4650]: I0201 07:55:35.965959 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:55:35 crc kubenswrapper[4650]: I0201 07:55:35.966056 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:55:35 crc kubenswrapper[4650]: E0201 07:55:35.966328 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:55:36 crc kubenswrapper[4650]: I0201 07:55:36.806619 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:36 crc kubenswrapper[4650]: I0201 07:55:36.807116 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:55:36 crc kubenswrapper[4650]: I0201 07:55:36.808179 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 07:55:36 crc kubenswrapper[4650]: I0201 07:55:36.808220 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:36 crc kubenswrapper[4650]: I0201 07:55:36.808255 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" gracePeriod=30 Feb 01 07:55:36 crc kubenswrapper[4650]: I0201 07:55:36.812863 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 07:55:36 crc kubenswrapper[4650]: E0201 07:55:36.929835 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:37 crc kubenswrapper[4650]: I0201 07:55:37.380625 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" exitCode=0 Feb 01 07:55:37 crc kubenswrapper[4650]: I0201 07:55:37.380670 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0"} Feb 01 07:55:37 crc kubenswrapper[4650]: I0201 07:55:37.380770 4650 scope.go:117] "RemoveContainer" containerID="3e8e9abcf03a13becab41449d9d3b061481a9e65fb41866ea32827e9ea671047" Feb 01 07:55:37 crc kubenswrapper[4650]: I0201 07:55:37.381577 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:55:37 crc kubenswrapper[4650]: I0201 07:55:37.381625 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:37 crc kubenswrapper[4650]: E0201 07:55:37.382308 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:49 crc kubenswrapper[4650]: I0201 07:55:49.966385 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:55:49 crc kubenswrapper[4650]: I0201 07:55:49.967297 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:55:49 crc kubenswrapper[4650]: E0201 07:55:49.967840 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:55:49 crc kubenswrapper[4650]: I0201 07:55:49.969551 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:55:49 crc kubenswrapper[4650]: I0201 07:55:49.969709 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:55:49 crc kubenswrapper[4650]: I0201 07:55:49.969970 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:55:49 crc kubenswrapper[4650]: E0201 07:55:49.970707 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:56:02 crc kubenswrapper[4650]: I0201 07:56:02.966391 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:56:02 crc kubenswrapper[4650]: I0201 07:56:02.967270 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:56:02 crc kubenswrapper[4650]: I0201 07:56:02.967479 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:56:02 crc kubenswrapper[4650]: E0201 07:56:02.968111 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.666521 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7" exitCode=1 Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.666569 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7"} Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.666609 4650 scope.go:117] "RemoveContainer" containerID="f3ddd55de1aeddf7a04dcf8fad21d6821622eb50041c19f6cc16fdd930faf590" Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.667547 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.667681 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.667836 4650 scope.go:117] "RemoveContainer" containerID="4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7" Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.667871 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:56:04 crc kubenswrapper[4650]: E0201 07:56:04.668540 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.965734 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:56:04 crc kubenswrapper[4650]: I0201 07:56:04.965796 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:56:04 crc kubenswrapper[4650]: E0201 07:56:04.966202 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:56:07 crc kubenswrapper[4650]: I0201 07:56:07.160983 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:56:07 crc kubenswrapper[4650]: I0201 07:56:07.162697 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:56:14 crc kubenswrapper[4650]: I0201 07:56:14.965827 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:56:14 crc kubenswrapper[4650]: I0201 07:56:14.967569 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:56:14 crc kubenswrapper[4650]: I0201 07:56:14.967800 4650 scope.go:117] "RemoveContainer" containerID="4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7" Feb 01 07:56:14 crc kubenswrapper[4650]: I0201 07:56:14.967892 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:56:14 crc kubenswrapper[4650]: E0201 07:56:14.968441 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:56:19 crc kubenswrapper[4650]: I0201 07:56:19.965531 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:56:19 crc kubenswrapper[4650]: I0201 07:56:19.966357 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:56:19 crc kubenswrapper[4650]: E0201 07:56:19.966972 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:56:27 crc kubenswrapper[4650]: I0201 07:56:27.966771 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:56:27 crc kubenswrapper[4650]: I0201 07:56:27.968962 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:56:27 crc kubenswrapper[4650]: I0201 07:56:27.969296 4650 scope.go:117] "RemoveContainer" containerID="4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7" Feb 01 07:56:27 crc kubenswrapper[4650]: I0201 07:56:27.969453 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:56:27 crc kubenswrapper[4650]: E0201 07:56:27.970483 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:56:30 crc kubenswrapper[4650]: I0201 07:56:30.965805 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:56:30 crc kubenswrapper[4650]: I0201 07:56:30.966057 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:56:30 crc kubenswrapper[4650]: E0201 07:56:30.966317 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:56:37 crc kubenswrapper[4650]: I0201 07:56:37.161647 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:56:37 crc kubenswrapper[4650]: I0201 07:56:37.162256 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:56:41 crc kubenswrapper[4650]: I0201 07:56:41.993558 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:56:41 crc kubenswrapper[4650]: I0201 07:56:41.994648 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:56:41 crc kubenswrapper[4650]: I0201 07:56:41.994967 4650 scope.go:117] "RemoveContainer" containerID="4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7" Feb 01 07:56:41 crc kubenswrapper[4650]: I0201 07:56:41.995006 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:56:41 crc kubenswrapper[4650]: E0201 07:56:41.996505 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:56:45 crc kubenswrapper[4650]: I0201 07:56:45.965768 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:56:45 crc kubenswrapper[4650]: I0201 07:56:45.966237 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:56:45 crc kubenswrapper[4650]: E0201 07:56:45.966527 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:56:46 crc kubenswrapper[4650]: I0201 07:56:46.949057 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:56:46 crc kubenswrapper[4650]: E0201 07:56:46.949235 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:56:46 crc kubenswrapper[4650]: E0201 07:56:46.949315 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 07:58:48.949296629 +0000 UTC m=+2127.672394874 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:56:52 crc kubenswrapper[4650]: E0201 07:56:52.645218 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:56:53 crc kubenswrapper[4650]: I0201 07:56:53.159144 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:56:54 crc kubenswrapper[4650]: I0201 07:56:54.966380 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:56:54 crc kubenswrapper[4650]: I0201 07:56:54.966930 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:56:54 crc kubenswrapper[4650]: I0201 07:56:54.967167 4650 scope.go:117] "RemoveContainer" containerID="4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7" Feb 01 07:56:54 crc kubenswrapper[4650]: I0201 07:56:54.967190 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.206020 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" exitCode=1 Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.206751 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" exitCode=1 Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.206055 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938"} Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.206798 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000"} Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.206815 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d"} Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.206838 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9"} Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.206862 4650 scope.go:117] "RemoveContainer" containerID="1f27d7b1c6be915d030908f1ec8addd24fc3a36eeb9436a2105df6da0d1973f9" Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.207904 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.208053 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:56:56 crc kubenswrapper[4650]: E0201 07:56:56.208710 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:56:56 crc kubenswrapper[4650]: I0201 07:56:56.290228 4650 scope.go:117] "RemoveContainer" containerID="4210c32ca15a0ecf782098089bf5631be131079ee5739f1a16af6c1932918754" Feb 01 07:56:57 crc kubenswrapper[4650]: I0201 07:56:57.226527 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" exitCode=1 Feb 01 07:56:57 crc kubenswrapper[4650]: I0201 07:56:57.226736 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938"} Feb 01 07:56:57 crc kubenswrapper[4650]: I0201 07:56:57.227170 4650 scope.go:117] "RemoveContainer" containerID="4dc4abf73635fe8e4fa7a56f1b5bc88594f27f3e8e388cdd75d10163a6a95950" Feb 01 07:56:57 crc kubenswrapper[4650]: I0201 07:56:57.227860 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:56:57 crc kubenswrapper[4650]: I0201 07:56:57.227916 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:56:57 crc kubenswrapper[4650]: I0201 07:56:57.227998 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:56:57 crc kubenswrapper[4650]: E0201 07:56:57.228277 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:57:00 crc kubenswrapper[4650]: I0201 07:57:00.965841 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:57:00 crc kubenswrapper[4650]: I0201 07:57:00.966473 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:57:00 crc kubenswrapper[4650]: E0201 07:57:00.966881 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.160956 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.161530 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.161580 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.162500 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1f111be91cc1e9fcafc3706031d6b3031dfcc3555d9fa4700a7b38ad3a07c2d"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.162570 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://a1f111be91cc1e9fcafc3706031d6b3031dfcc3555d9fa4700a7b38ad3a07c2d" gracePeriod=600 Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.350768 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="a1f111be91cc1e9fcafc3706031d6b3031dfcc3555d9fa4700a7b38ad3a07c2d" exitCode=0 Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.350817 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"a1f111be91cc1e9fcafc3706031d6b3031dfcc3555d9fa4700a7b38ad3a07c2d"} Feb 01 07:57:07 crc kubenswrapper[4650]: I0201 07:57:07.350868 4650 scope.go:117] "RemoveContainer" containerID="ca5abee74ef48661e0659b3aee35147d0e90c9a4a148b8a20dfa544e8b6987d4" Feb 01 07:57:08 crc kubenswrapper[4650]: I0201 07:57:08.364251 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f"} Feb 01 07:57:11 crc kubenswrapper[4650]: I0201 07:57:11.972834 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:57:11 crc kubenswrapper[4650]: I0201 07:57:11.973473 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:57:11 crc kubenswrapper[4650]: E0201 07:57:11.973860 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:57:12 crc kubenswrapper[4650]: I0201 07:57:12.966579 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:57:12 crc kubenswrapper[4650]: I0201 07:57:12.967072 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:57:12 crc kubenswrapper[4650]: I0201 07:57:12.967280 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:57:12 crc kubenswrapper[4650]: E0201 07:57:12.967779 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:57:22 crc kubenswrapper[4650]: I0201 07:57:22.966365 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:57:22 crc kubenswrapper[4650]: I0201 07:57:22.967172 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:57:22 crc kubenswrapper[4650]: E0201 07:57:22.967692 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:57:27 crc kubenswrapper[4650]: I0201 07:57:27.966578 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:57:27 crc kubenswrapper[4650]: I0201 07:57:27.967671 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:57:27 crc kubenswrapper[4650]: I0201 07:57:27.967878 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:57:27 crc kubenswrapper[4650]: E0201 07:57:27.968635 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:57:36 crc kubenswrapper[4650]: I0201 07:57:36.966475 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:57:36 crc kubenswrapper[4650]: I0201 07:57:36.967368 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:57:36 crc kubenswrapper[4650]: E0201 07:57:36.967990 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:57:39 crc kubenswrapper[4650]: I0201 07:57:39.966067 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:57:39 crc kubenswrapper[4650]: I0201 07:57:39.966448 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:57:39 crc kubenswrapper[4650]: I0201 07:57:39.966569 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:57:39 crc kubenswrapper[4650]: E0201 07:57:39.966916 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:57:51 crc kubenswrapper[4650]: I0201 07:57:51.975742 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:57:51 crc kubenswrapper[4650]: I0201 07:57:51.976403 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:57:51 crc kubenswrapper[4650]: E0201 07:57:51.976724 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:57:54 crc kubenswrapper[4650]: I0201 07:57:54.965723 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:57:54 crc kubenswrapper[4650]: I0201 07:57:54.966254 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:57:54 crc kubenswrapper[4650]: I0201 07:57:54.966340 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:57:54 crc kubenswrapper[4650]: E0201 07:57:54.966645 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:58:04 crc kubenswrapper[4650]: I0201 07:58:04.965142 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:04 crc kubenswrapper[4650]: I0201 07:58:04.967297 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:58:04 crc kubenswrapper[4650]: E0201 07:58:04.967995 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:05 crc kubenswrapper[4650]: I0201 07:58:05.965833 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:58:05 crc kubenswrapper[4650]: I0201 07:58:05.966423 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:58:05 crc kubenswrapper[4650]: I0201 07:58:05.966606 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:58:05 crc kubenswrapper[4650]: E0201 07:58:05.967316 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:58:16 crc kubenswrapper[4650]: I0201 07:58:16.964837 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:58:16 crc kubenswrapper[4650]: I0201 07:58:16.965375 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:58:16 crc kubenswrapper[4650]: I0201 07:58:16.965459 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:58:16 crc kubenswrapper[4650]: I0201 07:58:16.965612 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:16 crc kubenswrapper[4650]: I0201 07:58:16.965652 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:58:16 crc kubenswrapper[4650]: E0201 07:58:16.965728 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:58:16 crc kubenswrapper[4650]: E0201 07:58:16.966133 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:29 crc kubenswrapper[4650]: I0201 07:58:29.965282 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:58:29 crc kubenswrapper[4650]: I0201 07:58:29.965769 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:58:29 crc kubenswrapper[4650]: I0201 07:58:29.965853 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:58:29 crc kubenswrapper[4650]: E0201 07:58:29.966139 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:58:30 crc kubenswrapper[4650]: I0201 07:58:30.965918 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:30 crc kubenswrapper[4650]: I0201 07:58:30.966289 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:58:30 crc kubenswrapper[4650]: E0201 07:58:30.966571 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:41 crc kubenswrapper[4650]: I0201 07:58:41.976685 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:41 crc kubenswrapper[4650]: I0201 07:58:41.977518 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:58:41 crc kubenswrapper[4650]: E0201 07:58:41.978101 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:42 crc kubenswrapper[4650]: I0201 07:58:42.966967 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:58:42 crc kubenswrapper[4650]: I0201 07:58:42.967456 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:58:42 crc kubenswrapper[4650]: I0201 07:58:42.967777 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:58:42 crc kubenswrapper[4650]: E0201 07:58:42.968497 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:58:49 crc kubenswrapper[4650]: I0201 07:58:49.021245 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:58:49 crc kubenswrapper[4650]: E0201 07:58:49.021483 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 07:58:49 crc kubenswrapper[4650]: E0201 07:58:49.022081 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:00:51.022052018 +0000 UTC m=+2249.745150293 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 07:58:52 crc kubenswrapper[4650]: I0201 07:58:52.965992 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:52 crc kubenswrapper[4650]: I0201 07:58:52.966831 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:58:53 crc kubenswrapper[4650]: E0201 07:58:53.222910 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:53 crc kubenswrapper[4650]: I0201 07:58:53.453271 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b"} Feb 01 07:58:53 crc kubenswrapper[4650]: I0201 07:58:53.453594 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:58:53 crc kubenswrapper[4650]: I0201 07:58:53.453847 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:53 crc kubenswrapper[4650]: E0201 07:58:53.455480 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:54 crc kubenswrapper[4650]: I0201 07:58:54.470163 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" exitCode=1 Feb 01 07:58:54 crc kubenswrapper[4650]: I0201 07:58:54.470223 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b"} Feb 01 07:58:54 crc kubenswrapper[4650]: I0201 07:58:54.470275 4650 scope.go:117] "RemoveContainer" containerID="637de2e4d3aba39f71c183f3f14cbafd5b654327949f443342775429e1a08c7a" Feb 01 07:58:54 crc kubenswrapper[4650]: I0201 07:58:54.471493 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:54 crc kubenswrapper[4650]: I0201 07:58:54.471554 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 07:58:54 crc kubenswrapper[4650]: E0201 07:58:54.472226 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:54 crc kubenswrapper[4650]: I0201 07:58:54.800313 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 07:58:55 crc kubenswrapper[4650]: I0201 07:58:55.486342 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:55 crc kubenswrapper[4650]: I0201 07:58:55.486382 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 07:58:55 crc kubenswrapper[4650]: E0201 07:58:55.486878 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:56 crc kubenswrapper[4650]: E0201 07:58:56.161300 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 07:58:56 crc kubenswrapper[4650]: I0201 07:58:56.492966 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 07:58:56 crc kubenswrapper[4650]: I0201 07:58:56.493632 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:58:56 crc kubenswrapper[4650]: I0201 07:58:56.493656 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 07:58:56 crc kubenswrapper[4650]: E0201 07:58:56.494007 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:58:56 crc kubenswrapper[4650]: I0201 07:58:56.965301 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:58:56 crc kubenswrapper[4650]: I0201 07:58:56.965371 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:58:56 crc kubenswrapper[4650]: I0201 07:58:56.965458 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:58:56 crc kubenswrapper[4650]: E0201 07:58:56.965729 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:59:07 crc kubenswrapper[4650]: I0201 07:59:07.161243 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:59:07 crc kubenswrapper[4650]: I0201 07:59:07.162174 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:59:08 crc kubenswrapper[4650]: I0201 07:59:08.965139 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:59:08 crc kubenswrapper[4650]: I0201 07:59:08.966166 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 07:59:08 crc kubenswrapper[4650]: E0201 07:59:08.966446 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:59:09 crc kubenswrapper[4650]: I0201 07:59:09.966185 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:59:09 crc kubenswrapper[4650]: I0201 07:59:09.966329 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:59:09 crc kubenswrapper[4650]: I0201 07:59:09.966564 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:59:09 crc kubenswrapper[4650]: E0201 07:59:09.967164 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:59:20 crc kubenswrapper[4650]: I0201 07:59:20.965682 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:59:20 crc kubenswrapper[4650]: I0201 07:59:20.966312 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 07:59:20 crc kubenswrapper[4650]: E0201 07:59:20.966643 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:59:22 crc kubenswrapper[4650]: I0201 07:59:22.728078 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" exitCode=1 Feb 01 07:59:22 crc kubenswrapper[4650]: I0201 07:59:22.728136 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd"} Feb 01 07:59:22 crc kubenswrapper[4650]: I0201 07:59:22.728474 4650 scope.go:117] "RemoveContainer" containerID="8898426fa76236c2e570630635ac922875a39a37f68525ffa67c42004070d22e" Feb 01 07:59:22 crc kubenswrapper[4650]: I0201 07:59:22.729289 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:59:22 crc kubenswrapper[4650]: I0201 07:59:22.729369 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:59:22 crc kubenswrapper[4650]: I0201 07:59:22.729398 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 07:59:22 crc kubenswrapper[4650]: I0201 07:59:22.729561 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:59:22 crc kubenswrapper[4650]: E0201 07:59:22.730662 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:59:35 crc kubenswrapper[4650]: I0201 07:59:35.965381 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:59:35 crc kubenswrapper[4650]: I0201 07:59:35.966059 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 07:59:35 crc kubenswrapper[4650]: E0201 07:59:35.966359 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 07:59:36 crc kubenswrapper[4650]: I0201 07:59:36.966352 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:59:36 crc kubenswrapper[4650]: I0201 07:59:36.966807 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:59:36 crc kubenswrapper[4650]: I0201 07:59:36.966840 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 07:59:36 crc kubenswrapper[4650]: I0201 07:59:36.966921 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:59:36 crc kubenswrapper[4650]: E0201 07:59:36.967428 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:59:37 crc kubenswrapper[4650]: I0201 07:59:37.160741 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 07:59:37 crc kubenswrapper[4650]: I0201 07:59:37.160811 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 07:59:48 crc kubenswrapper[4650]: I0201 07:59:48.967480 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 07:59:48 crc kubenswrapper[4650]: I0201 07:59:48.968416 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 07:59:48 crc kubenswrapper[4650]: I0201 07:59:48.968465 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 07:59:48 crc kubenswrapper[4650]: I0201 07:59:48.968591 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 07:59:48 crc kubenswrapper[4650]: E0201 07:59:48.969342 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 07:59:50 crc kubenswrapper[4650]: I0201 07:59:50.965680 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 07:59:50 crc kubenswrapper[4650]: I0201 07:59:50.966135 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 07:59:50 crc kubenswrapper[4650]: E0201 07:59:50.966435 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.157814 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6"] Feb 01 08:00:00 crc kubenswrapper[4650]: E0201 08:00:00.158681 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="registry-server" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.158694 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="registry-server" Feb 01 08:00:00 crc kubenswrapper[4650]: E0201 08:00:00.158715 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="extract-utilities" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.158722 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="extract-utilities" Feb 01 08:00:00 crc kubenswrapper[4650]: E0201 08:00:00.158733 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="extract-utilities" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.158739 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="extract-utilities" Feb 01 08:00:00 crc kubenswrapper[4650]: E0201 08:00:00.158750 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="extract-content" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.158755 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="extract-content" Feb 01 08:00:00 crc kubenswrapper[4650]: E0201 08:00:00.158763 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="registry-server" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.158769 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="registry-server" Feb 01 08:00:00 crc kubenswrapper[4650]: E0201 08:00:00.158785 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="extract-content" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.158793 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="extract-content" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.158986 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a784dfb4-637a-4eeb-8db2-850bc25a72db" containerName="registry-server" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.159010 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="5538e2bb-a1bb-483c-a941-8e0b7074033e" containerName="registry-server" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.159634 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.162502 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.163787 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.172789 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6"] Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.247801 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6cdab4dc-8453-4157-b177-7c1e9e435992-secret-volume\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.248134 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6cdab4dc-8453-4157-b177-7c1e9e435992-config-volume\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.248257 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kzjv\" (UniqueName: \"kubernetes.io/projected/6cdab4dc-8453-4157-b177-7c1e9e435992-kube-api-access-6kzjv\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.349963 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kzjv\" (UniqueName: \"kubernetes.io/projected/6cdab4dc-8453-4157-b177-7c1e9e435992-kube-api-access-6kzjv\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.350105 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6cdab4dc-8453-4157-b177-7c1e9e435992-secret-volume\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.350174 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6cdab4dc-8453-4157-b177-7c1e9e435992-config-volume\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.350979 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6cdab4dc-8453-4157-b177-7c1e9e435992-config-volume\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.361937 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6cdab4dc-8453-4157-b177-7c1e9e435992-secret-volume\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.370523 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kzjv\" (UniqueName: \"kubernetes.io/projected/6cdab4dc-8453-4157-b177-7c1e9e435992-kube-api-access-6kzjv\") pod \"collect-profiles-29498880-xxwl6\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.480795 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:00 crc kubenswrapper[4650]: I0201 08:00:00.969888 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6"] Feb 01 08:00:01 crc kubenswrapper[4650]: I0201 08:00:01.092179 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" event={"ID":"6cdab4dc-8453-4157-b177-7c1e9e435992","Type":"ContainerStarted","Data":"49327be30337f2cd5b44c044e1a8ced3cd4841a587dfa22db53216d8c43f4bdd"} Feb 01 08:00:01 crc kubenswrapper[4650]: I0201 08:00:01.978179 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 08:00:01 crc kubenswrapper[4650]: I0201 08:00:01.978543 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:01 crc kubenswrapper[4650]: E0201 08:00:01.979749 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:02 crc kubenswrapper[4650]: I0201 08:00:02.120464 4650 generic.go:334] "Generic (PLEG): container finished" podID="6cdab4dc-8453-4157-b177-7c1e9e435992" containerID="828b46a55cf026633e5db5d7c791b4129e8fcddc2d97a401fb316433a12151f0" exitCode=0 Feb 01 08:00:02 crc kubenswrapper[4650]: I0201 08:00:02.120516 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" event={"ID":"6cdab4dc-8453-4157-b177-7c1e9e435992","Type":"ContainerDied","Data":"828b46a55cf026633e5db5d7c791b4129e8fcddc2d97a401fb316433a12151f0"} Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.486655 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.620154 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kzjv\" (UniqueName: \"kubernetes.io/projected/6cdab4dc-8453-4157-b177-7c1e9e435992-kube-api-access-6kzjv\") pod \"6cdab4dc-8453-4157-b177-7c1e9e435992\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.620386 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6cdab4dc-8453-4157-b177-7c1e9e435992-secret-volume\") pod \"6cdab4dc-8453-4157-b177-7c1e9e435992\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.620447 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6cdab4dc-8453-4157-b177-7c1e9e435992-config-volume\") pod \"6cdab4dc-8453-4157-b177-7c1e9e435992\" (UID: \"6cdab4dc-8453-4157-b177-7c1e9e435992\") " Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.621733 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cdab4dc-8453-4157-b177-7c1e9e435992-config-volume" (OuterVolumeSpecName: "config-volume") pod "6cdab4dc-8453-4157-b177-7c1e9e435992" (UID: "6cdab4dc-8453-4157-b177-7c1e9e435992"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.627528 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cdab4dc-8453-4157-b177-7c1e9e435992-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6cdab4dc-8453-4157-b177-7c1e9e435992" (UID: "6cdab4dc-8453-4157-b177-7c1e9e435992"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.628206 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cdab4dc-8453-4157-b177-7c1e9e435992-kube-api-access-6kzjv" (OuterVolumeSpecName: "kube-api-access-6kzjv") pod "6cdab4dc-8453-4157-b177-7c1e9e435992" (UID: "6cdab4dc-8453-4157-b177-7c1e9e435992"). InnerVolumeSpecName "kube-api-access-6kzjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.723009 4650 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6cdab4dc-8453-4157-b177-7c1e9e435992-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.723068 4650 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6cdab4dc-8453-4157-b177-7c1e9e435992-config-volume\") on node \"crc\" DevicePath \"\"" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.723079 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kzjv\" (UniqueName: \"kubernetes.io/projected/6cdab4dc-8453-4157-b177-7c1e9e435992-kube-api-access-6kzjv\") on node \"crc\" DevicePath \"\"" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.966157 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.966583 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.966621 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:00:03 crc kubenswrapper[4650]: I0201 08:00:03.966718 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:00:03 crc kubenswrapper[4650]: E0201 08:00:03.967261 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:00:04 crc kubenswrapper[4650]: I0201 08:00:04.141312 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" event={"ID":"6cdab4dc-8453-4157-b177-7c1e9e435992","Type":"ContainerDied","Data":"49327be30337f2cd5b44c044e1a8ced3cd4841a587dfa22db53216d8c43f4bdd"} Feb 01 08:00:04 crc kubenswrapper[4650]: I0201 08:00:04.141346 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49327be30337f2cd5b44c044e1a8ced3cd4841a587dfa22db53216d8c43f4bdd" Feb 01 08:00:04 crc kubenswrapper[4650]: I0201 08:00:04.141400 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498880-xxwl6" Feb 01 08:00:04 crc kubenswrapper[4650]: I0201 08:00:04.579966 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh"] Feb 01 08:00:04 crc kubenswrapper[4650]: I0201 08:00:04.586538 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498835-4crxh"] Feb 01 08:00:05 crc kubenswrapper[4650]: I0201 08:00:05.980629 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a139c37-a580-476f-a35b-e5daba038dbc" path="/var/lib/kubelet/pods/2a139c37-a580-476f-a35b-e5daba038dbc/volumes" Feb 01 08:00:07 crc kubenswrapper[4650]: I0201 08:00:07.161602 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:00:07 crc kubenswrapper[4650]: I0201 08:00:07.161893 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:00:07 crc kubenswrapper[4650]: I0201 08:00:07.161936 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 08:00:07 crc kubenswrapper[4650]: I0201 08:00:07.162670 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 08:00:07 crc kubenswrapper[4650]: I0201 08:00:07.162731 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" gracePeriod=600 Feb 01 08:00:07 crc kubenswrapper[4650]: E0201 08:00:07.311408 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:00:08 crc kubenswrapper[4650]: I0201 08:00:08.181162 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" exitCode=0 Feb 01 08:00:08 crc kubenswrapper[4650]: I0201 08:00:08.181230 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f"} Feb 01 08:00:08 crc kubenswrapper[4650]: I0201 08:00:08.182074 4650 scope.go:117] "RemoveContainer" containerID="a1f111be91cc1e9fcafc3706031d6b3031dfcc3555d9fa4700a7b38ad3a07c2d" Feb 01 08:00:08 crc kubenswrapper[4650]: I0201 08:00:08.182693 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:00:08 crc kubenswrapper[4650]: E0201 08:00:08.182932 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:00:13 crc kubenswrapper[4650]: I0201 08:00:13.966136 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 08:00:13 crc kubenswrapper[4650]: I0201 08:00:13.966813 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:13 crc kubenswrapper[4650]: E0201 08:00:13.967335 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:16 crc kubenswrapper[4650]: I0201 08:00:16.965541 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:00:16 crc kubenswrapper[4650]: I0201 08:00:16.966150 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:00:16 crc kubenswrapper[4650]: I0201 08:00:16.966174 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:00:16 crc kubenswrapper[4650]: I0201 08:00:16.966230 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:00:16 crc kubenswrapper[4650]: E0201 08:00:16.966529 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:00:22 crc kubenswrapper[4650]: I0201 08:00:22.966657 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:00:22 crc kubenswrapper[4650]: E0201 08:00:22.968817 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.332972 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" exitCode=1 Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.333050 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000"} Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.333097 4650 scope.go:117] "RemoveContainer" containerID="4b33320c9f35a50c528cb00327a30a15dbb184f32e6aea5d10e70ce7875241d7" Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.334211 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.334299 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.334338 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.334417 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:00:23 crc kubenswrapper[4650]: I0201 08:00:23.334446 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:00:23 crc kubenswrapper[4650]: E0201 08:00:23.335212 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:00:26 crc kubenswrapper[4650]: I0201 08:00:26.966117 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 08:00:26 crc kubenswrapper[4650]: I0201 08:00:26.966693 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:26 crc kubenswrapper[4650]: E0201 08:00:26.966895 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:27 crc kubenswrapper[4650]: I0201 08:00:27.390254 4650 scope.go:117] "RemoveContainer" containerID="2e9cb87a68209a8c637af3e8b5ed2226512a463b4820c8374a6ceafdb7a4880f" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.381969 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dwprt"] Feb 01 08:00:31 crc kubenswrapper[4650]: E0201 08:00:31.385261 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cdab4dc-8453-4157-b177-7c1e9e435992" containerName="collect-profiles" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.385412 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cdab4dc-8453-4157-b177-7c1e9e435992" containerName="collect-profiles" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.385765 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cdab4dc-8453-4157-b177-7c1e9e435992" containerName="collect-profiles" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.387535 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.407820 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dwprt"] Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.449480 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-catalog-content\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.449817 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-utilities\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.449968 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfnv7\" (UniqueName: \"kubernetes.io/projected/ced033e2-978b-4840-8f84-77c75e881b8b-kube-api-access-jfnv7\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.551434 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-utilities\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.551506 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfnv7\" (UniqueName: \"kubernetes.io/projected/ced033e2-978b-4840-8f84-77c75e881b8b-kube-api-access-jfnv7\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.551999 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-catalog-content\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.552065 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-utilities\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.552359 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-catalog-content\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.577761 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfnv7\" (UniqueName: \"kubernetes.io/projected/ced033e2-978b-4840-8f84-77c75e881b8b-kube-api-access-jfnv7\") pod \"redhat-operators-dwprt\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:31 crc kubenswrapper[4650]: I0201 08:00:31.732354 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:32 crc kubenswrapper[4650]: I0201 08:00:32.062323 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dwprt"] Feb 01 08:00:32 crc kubenswrapper[4650]: I0201 08:00:32.454489 4650 generic.go:334] "Generic (PLEG): container finished" podID="ced033e2-978b-4840-8f84-77c75e881b8b" containerID="75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c" exitCode=0 Feb 01 08:00:32 crc kubenswrapper[4650]: I0201 08:00:32.454696 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwprt" event={"ID":"ced033e2-978b-4840-8f84-77c75e881b8b","Type":"ContainerDied","Data":"75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c"} Feb 01 08:00:32 crc kubenswrapper[4650]: I0201 08:00:32.454752 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwprt" event={"ID":"ced033e2-978b-4840-8f84-77c75e881b8b","Type":"ContainerStarted","Data":"05b4384ddde8693c0dd71c1b45d3b2c330ae4d7d99e237cccbf15e4b47a7d3ff"} Feb 01 08:00:32 crc kubenswrapper[4650]: I0201 08:00:32.456386 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 08:00:33 crc kubenswrapper[4650]: I0201 08:00:33.463278 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwprt" event={"ID":"ced033e2-978b-4840-8f84-77c75e881b8b","Type":"ContainerStarted","Data":"7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320"} Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.793842 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bzmj5"] Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.795744 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.810861 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bzmj5"] Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.870791 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-catalog-content\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.870860 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-utilities\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.870977 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-677sd\" (UniqueName: \"kubernetes.io/projected/a1884690-42b3-4207-9ea0-5d62db5eb3fb-kube-api-access-677sd\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.972163 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-677sd\" (UniqueName: \"kubernetes.io/projected/a1884690-42b3-4207-9ea0-5d62db5eb3fb-kube-api-access-677sd\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.972264 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-catalog-content\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.973012 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-catalog-content\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.973072 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-utilities\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.973090 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-utilities\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:34 crc kubenswrapper[4650]: I0201 08:00:34.990143 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-677sd\" (UniqueName: \"kubernetes.io/projected/a1884690-42b3-4207-9ea0-5d62db5eb3fb-kube-api-access-677sd\") pod \"certified-operators-bzmj5\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.113926 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.719191 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bzmj5"] Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.965224 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.965309 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.965337 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.965395 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.965405 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:00:35 crc kubenswrapper[4650]: I0201 08:00:35.965629 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:00:35 crc kubenswrapper[4650]: E0201 08:00:35.965807 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:00:35 crc kubenswrapper[4650]: E0201 08:00:35.965879 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:00:36 crc kubenswrapper[4650]: I0201 08:00:36.485704 4650 generic.go:334] "Generic (PLEG): container finished" podID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerID="936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4" exitCode=0 Feb 01 08:00:36 crc kubenswrapper[4650]: I0201 08:00:36.485987 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzmj5" event={"ID":"a1884690-42b3-4207-9ea0-5d62db5eb3fb","Type":"ContainerDied","Data":"936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4"} Feb 01 08:00:36 crc kubenswrapper[4650]: I0201 08:00:36.486015 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzmj5" event={"ID":"a1884690-42b3-4207-9ea0-5d62db5eb3fb","Type":"ContainerStarted","Data":"2b7e86d038a169a31201d76ac152b5d8152e8ff6151fb8b89da7ac8c8d2bc36d"} Feb 01 08:00:37 crc kubenswrapper[4650]: I0201 08:00:37.501270 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzmj5" event={"ID":"a1884690-42b3-4207-9ea0-5d62db5eb3fb","Type":"ContainerStarted","Data":"a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16"} Feb 01 08:00:38 crc kubenswrapper[4650]: I0201 08:00:38.965266 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 08:00:38 crc kubenswrapper[4650]: I0201 08:00:38.965673 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:39 crc kubenswrapper[4650]: E0201 08:00:39.252990 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:39 crc kubenswrapper[4650]: I0201 08:00:39.526851 4650 generic.go:334] "Generic (PLEG): container finished" podID="ced033e2-978b-4840-8f84-77c75e881b8b" containerID="7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320" exitCode=0 Feb 01 08:00:39 crc kubenswrapper[4650]: I0201 08:00:39.526923 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwprt" event={"ID":"ced033e2-978b-4840-8f84-77c75e881b8b","Type":"ContainerDied","Data":"7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320"} Feb 01 08:00:39 crc kubenswrapper[4650]: I0201 08:00:39.534010 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"42aff4c8181ab854febff687de14b47c444c56c4130c5fa1af7c19fe0a715291"} Feb 01 08:00:39 crc kubenswrapper[4650]: I0201 08:00:39.534309 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:00:39 crc kubenswrapper[4650]: I0201 08:00:39.534755 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:39 crc kubenswrapper[4650]: E0201 08:00:39.535053 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:40 crc kubenswrapper[4650]: I0201 08:00:40.543065 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwprt" event={"ID":"ced033e2-978b-4840-8f84-77c75e881b8b","Type":"ContainerStarted","Data":"468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138"} Feb 01 08:00:40 crc kubenswrapper[4650]: I0201 08:00:40.546527 4650 generic.go:334] "Generic (PLEG): container finished" podID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerID="a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16" exitCode=0 Feb 01 08:00:40 crc kubenswrapper[4650]: I0201 08:00:40.546593 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzmj5" event={"ID":"a1884690-42b3-4207-9ea0-5d62db5eb3fb","Type":"ContainerDied","Data":"a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16"} Feb 01 08:00:40 crc kubenswrapper[4650]: I0201 08:00:40.547146 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:40 crc kubenswrapper[4650]: E0201 08:00:40.547386 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:40 crc kubenswrapper[4650]: I0201 08:00:40.618882 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dwprt" podStartSLOduration=2.092954386 podStartE2EDuration="9.618857254s" podCreationTimestamp="2026-02-01 08:00:31 +0000 UTC" firstStartedPulling="2026-02-01 08:00:32.456206858 +0000 UTC m=+2231.179305103" lastFinishedPulling="2026-02-01 08:00:39.982109716 +0000 UTC m=+2238.705207971" observedRunningTime="2026-02-01 08:00:40.613602205 +0000 UTC m=+2239.336700470" watchObservedRunningTime="2026-02-01 08:00:40.618857254 +0000 UTC m=+2239.341955509" Feb 01 08:00:41 crc kubenswrapper[4650]: I0201 08:00:41.556882 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzmj5" event={"ID":"a1884690-42b3-4207-9ea0-5d62db5eb3fb","Type":"ContainerStarted","Data":"a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a"} Feb 01 08:00:41 crc kubenswrapper[4650]: I0201 08:00:41.586427 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bzmj5" podStartSLOduration=3.144933209 podStartE2EDuration="7.58640772s" podCreationTimestamp="2026-02-01 08:00:34 +0000 UTC" firstStartedPulling="2026-02-01 08:00:36.487865724 +0000 UTC m=+2235.210963969" lastFinishedPulling="2026-02-01 08:00:40.929340235 +0000 UTC m=+2239.652438480" observedRunningTime="2026-02-01 08:00:41.576316183 +0000 UTC m=+2240.299414438" watchObservedRunningTime="2026-02-01 08:00:41.58640772 +0000 UTC m=+2240.309505985" Feb 01 08:00:41 crc kubenswrapper[4650]: I0201 08:00:41.733625 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:41 crc kubenswrapper[4650]: I0201 08:00:41.734796 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:00:42 crc kubenswrapper[4650]: I0201 08:00:42.790340 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dwprt" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="registry-server" probeResult="failure" output=< Feb 01 08:00:42 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 08:00:42 crc kubenswrapper[4650]: > Feb 01 08:00:44 crc kubenswrapper[4650]: I0201 08:00:44.813229 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:00:45 crc kubenswrapper[4650]: I0201 08:00:45.115506 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:45 crc kubenswrapper[4650]: I0201 08:00:45.115564 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:45 crc kubenswrapper[4650]: I0201 08:00:45.177976 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:45 crc kubenswrapper[4650]: I0201 08:00:45.804175 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:00:46 crc kubenswrapper[4650]: I0201 08:00:46.965355 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:00:46 crc kubenswrapper[4650]: E0201 08:00:46.966002 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:00:48 crc kubenswrapper[4650]: I0201 08:00:48.814455 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:00:49 crc kubenswrapper[4650]: I0201 08:00:49.808988 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:00:49 crc kubenswrapper[4650]: I0201 08:00:49.966452 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:00:49 crc kubenswrapper[4650]: I0201 08:00:49.966544 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:00:49 crc kubenswrapper[4650]: I0201 08:00:49.966575 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:00:49 crc kubenswrapper[4650]: I0201 08:00:49.966674 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:00:49 crc kubenswrapper[4650]: I0201 08:00:49.966686 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:00:49 crc kubenswrapper[4650]: E0201 08:00:49.967175 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:00:51 crc kubenswrapper[4650]: I0201 08:00:51.118912 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:00:51 crc kubenswrapper[4650]: E0201 08:00:51.119057 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:00:51 crc kubenswrapper[4650]: E0201 08:00:51.119140 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:02:53.119118922 +0000 UTC m=+2371.842217177 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:00:51 crc kubenswrapper[4650]: I0201 08:00:51.803741 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:00:51 crc kubenswrapper[4650]: I0201 08:00:51.804272 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:00:51 crc kubenswrapper[4650]: I0201 08:00:51.805207 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"42aff4c8181ab854febff687de14b47c444c56c4130c5fa1af7c19fe0a715291"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:00:51 crc kubenswrapper[4650]: I0201 08:00:51.805241 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:51 crc kubenswrapper[4650]: I0201 08:00:51.805278 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://42aff4c8181ab854febff687de14b47c444c56c4130c5fa1af7c19fe0a715291" gracePeriod=30 Feb 01 08:00:51 crc kubenswrapper[4650]: I0201 08:00:51.809943 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:00:52 crc kubenswrapper[4650]: E0201 08:00:52.197204 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:52 crc kubenswrapper[4650]: I0201 08:00:52.654882 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="42aff4c8181ab854febff687de14b47c444c56c4130c5fa1af7c19fe0a715291" exitCode=0 Feb 01 08:00:52 crc kubenswrapper[4650]: I0201 08:00:52.654929 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"42aff4c8181ab854febff687de14b47c444c56c4130c5fa1af7c19fe0a715291"} Feb 01 08:00:52 crc kubenswrapper[4650]: I0201 08:00:52.654994 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb"} Feb 01 08:00:52 crc kubenswrapper[4650]: I0201 08:00:52.655046 4650 scope.go:117] "RemoveContainer" containerID="ad671d1bd999cf05110551aef111af0f4b7594608d3850b080ee586f041a77d0" Feb 01 08:00:52 crc kubenswrapper[4650]: I0201 08:00:52.655436 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:00:52 crc kubenswrapper[4650]: I0201 08:00:52.656315 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:52 crc kubenswrapper[4650]: E0201 08:00:52.656877 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:52 crc kubenswrapper[4650]: I0201 08:00:52.784803 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dwprt" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="registry-server" probeResult="failure" output=< Feb 01 08:00:52 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 08:00:52 crc kubenswrapper[4650]: > Feb 01 08:00:53 crc kubenswrapper[4650]: I0201 08:00:53.671604 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:00:53 crc kubenswrapper[4650]: E0201 08:00:53.672010 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:00:55 crc kubenswrapper[4650]: I0201 08:00:55.162882 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:55 crc kubenswrapper[4650]: I0201 08:00:55.205955 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bzmj5"] Feb 01 08:00:55 crc kubenswrapper[4650]: I0201 08:00:55.688392 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bzmj5" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="registry-server" containerID="cri-o://a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a" gracePeriod=2 Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.218153 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.337793 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-utilities\") pod \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.338293 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-677sd\" (UniqueName: \"kubernetes.io/projected/a1884690-42b3-4207-9ea0-5d62db5eb3fb-kube-api-access-677sd\") pod \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.338488 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-catalog-content\") pod \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\" (UID: \"a1884690-42b3-4207-9ea0-5d62db5eb3fb\") " Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.338828 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-utilities" (OuterVolumeSpecName: "utilities") pod "a1884690-42b3-4207-9ea0-5d62db5eb3fb" (UID: "a1884690-42b3-4207-9ea0-5d62db5eb3fb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.339596 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.345944 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1884690-42b3-4207-9ea0-5d62db5eb3fb-kube-api-access-677sd" (OuterVolumeSpecName: "kube-api-access-677sd") pod "a1884690-42b3-4207-9ea0-5d62db5eb3fb" (UID: "a1884690-42b3-4207-9ea0-5d62db5eb3fb"). InnerVolumeSpecName "kube-api-access-677sd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.414179 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a1884690-42b3-4207-9ea0-5d62db5eb3fb" (UID: "a1884690-42b3-4207-9ea0-5d62db5eb3fb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.440913 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1884690-42b3-4207-9ea0-5d62db5eb3fb-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.441135 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-677sd\" (UniqueName: \"kubernetes.io/projected/a1884690-42b3-4207-9ea0-5d62db5eb3fb-kube-api-access-677sd\") on node \"crc\" DevicePath \"\"" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.702888 4650 generic.go:334] "Generic (PLEG): container finished" podID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerID="a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a" exitCode=0 Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.702926 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzmj5" event={"ID":"a1884690-42b3-4207-9ea0-5d62db5eb3fb","Type":"ContainerDied","Data":"a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a"} Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.702960 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzmj5" event={"ID":"a1884690-42b3-4207-9ea0-5d62db5eb3fb","Type":"ContainerDied","Data":"2b7e86d038a169a31201d76ac152b5d8152e8ff6151fb8b89da7ac8c8d2bc36d"} Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.702977 4650 scope.go:117] "RemoveContainer" containerID="a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.703114 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzmj5" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.748790 4650 scope.go:117] "RemoveContainer" containerID="a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.749640 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bzmj5"] Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.786545 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bzmj5"] Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.793809 4650 scope.go:117] "RemoveContainer" containerID="936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.835506 4650 scope.go:117] "RemoveContainer" containerID="a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a" Feb 01 08:00:56 crc kubenswrapper[4650]: E0201 08:00:56.837219 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a\": container with ID starting with a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a not found: ID does not exist" containerID="a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.837258 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a"} err="failed to get container status \"a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a\": rpc error: code = NotFound desc = could not find container \"a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a\": container with ID starting with a4651c068c2c4318a852911bdf8f728d347dc946b7249fa459a4ccc70e7eea3a not found: ID does not exist" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.837282 4650 scope.go:117] "RemoveContainer" containerID="a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16" Feb 01 08:00:56 crc kubenswrapper[4650]: E0201 08:00:56.837637 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16\": container with ID starting with a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16 not found: ID does not exist" containerID="a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.837681 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16"} err="failed to get container status \"a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16\": rpc error: code = NotFound desc = could not find container \"a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16\": container with ID starting with a55eaad408995a8786fe0778503f614bac10d9eb61d201b625de9c8a55026b16 not found: ID does not exist" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.837708 4650 scope.go:117] "RemoveContainer" containerID="936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4" Feb 01 08:00:56 crc kubenswrapper[4650]: E0201 08:00:56.838623 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4\": container with ID starting with 936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4 not found: ID does not exist" containerID="936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4" Feb 01 08:00:56 crc kubenswrapper[4650]: I0201 08:00:56.838654 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4"} err="failed to get container status \"936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4\": rpc error: code = NotFound desc = could not find container \"936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4\": container with ID starting with 936ba1940fac0294c997d73887d0c6139cfa83b70cff0f71046681b7b61163d4 not found: ID does not exist" Feb 01 08:00:57 crc kubenswrapper[4650]: I0201 08:00:57.806982 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:00:57 crc kubenswrapper[4650]: I0201 08:00:57.989839 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" path="/var/lib/kubelet/pods/a1884690-42b3-4207-9ea0-5d62db5eb3fb/volumes" Feb 01 08:00:59 crc kubenswrapper[4650]: E0201 08:00:59.494381 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:00:59 crc kubenswrapper[4650]: I0201 08:00:59.729414 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:00:59 crc kubenswrapper[4650]: I0201 08:00:59.810368 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.166357 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29498881-4thgd"] Feb 01 08:01:00 crc kubenswrapper[4650]: E0201 08:01:00.167094 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="registry-server" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.167202 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="registry-server" Feb 01 08:01:00 crc kubenswrapper[4650]: E0201 08:01:00.167283 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="extract-utilities" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.167363 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="extract-utilities" Feb 01 08:01:00 crc kubenswrapper[4650]: E0201 08:01:00.167439 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="extract-content" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.167542 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="extract-content" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.167838 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1884690-42b3-4207-9ea0-5d62db5eb3fb" containerName="registry-server" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.168618 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.191986 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29498881-4thgd"] Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.224072 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc85r\" (UniqueName: \"kubernetes.io/projected/44e12371-b3b4-4575-8595-6f212ce4cb89-kube-api-access-pc85r\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.224197 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-config-data\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.224236 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-fernet-keys\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.224257 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-combined-ca-bundle\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.326500 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc85r\" (UniqueName: \"kubernetes.io/projected/44e12371-b3b4-4575-8595-6f212ce4cb89-kube-api-access-pc85r\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.326745 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-config-data\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.326802 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-fernet-keys\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.326831 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-combined-ca-bundle\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.333540 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-fernet-keys\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.335693 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-combined-ca-bundle\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.346736 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-config-data\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.347247 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc85r\" (UniqueName: \"kubernetes.io/projected/44e12371-b3b4-4575-8595-6f212ce4cb89-kube-api-access-pc85r\") pod \"keystone-cron-29498881-4thgd\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.492301 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.805209 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:01:00 crc kubenswrapper[4650]: I0201 08:01:00.911137 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29498881-4thgd"] Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.751768 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29498881-4thgd" event={"ID":"44e12371-b3b4-4575-8595-6f212ce4cb89","Type":"ContainerStarted","Data":"7afe68de3baa2b957f80dffde3cec4def9e50fcb14bae46b0d3b11764d1d5b85"} Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.752119 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29498881-4thgd" event={"ID":"44e12371-b3b4-4575-8595-6f212ce4cb89","Type":"ContainerStarted","Data":"611635d7557d877a1a36e24d16046d5a638bac0811dec96211ee7faf37a83ed8"} Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.776003 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29498881-4thgd" podStartSLOduration=1.7759878470000001 podStartE2EDuration="1.775987847s" podCreationTimestamp="2026-02-01 08:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 08:01:01.768748176 +0000 UTC m=+2260.491846421" watchObservedRunningTime="2026-02-01 08:01:01.775987847 +0000 UTC m=+2260.499086092" Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.971968 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:01:01 crc kubenswrapper[4650]: E0201 08:01:01.972619 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.972841 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.972910 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.972931 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.973006 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:01:01 crc kubenswrapper[4650]: I0201 08:01:01.973014 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:01:01 crc kubenswrapper[4650]: E0201 08:01:01.973461 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:01:02 crc kubenswrapper[4650]: I0201 08:01:02.796350 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dwprt" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="registry-server" probeResult="failure" output=< Feb 01 08:01:02 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 08:01:02 crc kubenswrapper[4650]: > Feb 01 08:01:03 crc kubenswrapper[4650]: I0201 08:01:03.807202 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:01:03 crc kubenswrapper[4650]: I0201 08:01:03.808295 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:01:03 crc kubenswrapper[4650]: I0201 08:01:03.809225 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:01:03 crc kubenswrapper[4650]: I0201 08:01:03.809256 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:01:03 crc kubenswrapper[4650]: I0201 08:01:03.809294 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" gracePeriod=30 Feb 01 08:01:03 crc kubenswrapper[4650]: I0201 08:01:03.814540 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": read tcp 10.217.0.2:49680->10.217.0.173:8080: read: connection reset by peer" Feb 01 08:01:03 crc kubenswrapper[4650]: E0201 08:01:03.927902 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:01:04 crc kubenswrapper[4650]: I0201 08:01:04.777273 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" exitCode=0 Feb 01 08:01:04 crc kubenswrapper[4650]: I0201 08:01:04.777399 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb"} Feb 01 08:01:04 crc kubenswrapper[4650]: I0201 08:01:04.777630 4650 scope.go:117] "RemoveContainer" containerID="42aff4c8181ab854febff687de14b47c444c56c4130c5fa1af7c19fe0a715291" Feb 01 08:01:04 crc kubenswrapper[4650]: I0201 08:01:04.778757 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:01:04 crc kubenswrapper[4650]: I0201 08:01:04.778815 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:01:04 crc kubenswrapper[4650]: E0201 08:01:04.779248 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:01:04 crc kubenswrapper[4650]: I0201 08:01:04.781539 4650 generic.go:334] "Generic (PLEG): container finished" podID="44e12371-b3b4-4575-8595-6f212ce4cb89" containerID="7afe68de3baa2b957f80dffde3cec4def9e50fcb14bae46b0d3b11764d1d5b85" exitCode=0 Feb 01 08:01:04 crc kubenswrapper[4650]: I0201 08:01:04.781582 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29498881-4thgd" event={"ID":"44e12371-b3b4-4575-8595-6f212ce4cb89","Type":"ContainerDied","Data":"7afe68de3baa2b957f80dffde3cec4def9e50fcb14bae46b0d3b11764d1d5b85"} Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.141335 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.271734 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-combined-ca-bundle\") pod \"44e12371-b3b4-4575-8595-6f212ce4cb89\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.271841 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-fernet-keys\") pod \"44e12371-b3b4-4575-8595-6f212ce4cb89\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.271896 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-config-data\") pod \"44e12371-b3b4-4575-8595-6f212ce4cb89\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.271950 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc85r\" (UniqueName: \"kubernetes.io/projected/44e12371-b3b4-4575-8595-6f212ce4cb89-kube-api-access-pc85r\") pod \"44e12371-b3b4-4575-8595-6f212ce4cb89\" (UID: \"44e12371-b3b4-4575-8595-6f212ce4cb89\") " Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.278812 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "44e12371-b3b4-4575-8595-6f212ce4cb89" (UID: "44e12371-b3b4-4575-8595-6f212ce4cb89"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.295848 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44e12371-b3b4-4575-8595-6f212ce4cb89-kube-api-access-pc85r" (OuterVolumeSpecName: "kube-api-access-pc85r") pod "44e12371-b3b4-4575-8595-6f212ce4cb89" (UID: "44e12371-b3b4-4575-8595-6f212ce4cb89"). InnerVolumeSpecName "kube-api-access-pc85r". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.304630 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44e12371-b3b4-4575-8595-6f212ce4cb89" (UID: "44e12371-b3b4-4575-8595-6f212ce4cb89"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.341173 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-config-data" (OuterVolumeSpecName: "config-data") pod "44e12371-b3b4-4575-8595-6f212ce4cb89" (UID: "44e12371-b3b4-4575-8595-6f212ce4cb89"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.376351 4650 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.376382 4650 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.376392 4650 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44e12371-b3b4-4575-8595-6f212ce4cb89-config-data\") on node \"crc\" DevicePath \"\"" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.376403 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc85r\" (UniqueName: \"kubernetes.io/projected/44e12371-b3b4-4575-8595-6f212ce4cb89-kube-api-access-pc85r\") on node \"crc\" DevicePath \"\"" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.804971 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29498881-4thgd" event={"ID":"44e12371-b3b4-4575-8595-6f212ce4cb89","Type":"ContainerDied","Data":"611635d7557d877a1a36e24d16046d5a638bac0811dec96211ee7faf37a83ed8"} Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.805006 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="611635d7557d877a1a36e24d16046d5a638bac0811dec96211ee7faf37a83ed8" Feb 01 08:01:06 crc kubenswrapper[4650]: I0201 08:01:06.805073 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29498881-4thgd" Feb 01 08:01:11 crc kubenswrapper[4650]: I0201 08:01:11.797624 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:01:11 crc kubenswrapper[4650]: I0201 08:01:11.867292 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:01:12 crc kubenswrapper[4650]: I0201 08:01:12.045072 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dwprt"] Feb 01 08:01:12 crc kubenswrapper[4650]: I0201 08:01:12.863109 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dwprt" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="registry-server" containerID="cri-o://468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138" gracePeriod=2 Feb 01 08:01:12 crc kubenswrapper[4650]: I0201 08:01:12.966449 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:01:12 crc kubenswrapper[4650]: E0201 08:01:12.966981 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.404139 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.530481 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-utilities\") pod \"ced033e2-978b-4840-8f84-77c75e881b8b\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.530717 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfnv7\" (UniqueName: \"kubernetes.io/projected/ced033e2-978b-4840-8f84-77c75e881b8b-kube-api-access-jfnv7\") pod \"ced033e2-978b-4840-8f84-77c75e881b8b\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.530751 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-catalog-content\") pod \"ced033e2-978b-4840-8f84-77c75e881b8b\" (UID: \"ced033e2-978b-4840-8f84-77c75e881b8b\") " Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.531940 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-utilities" (OuterVolumeSpecName: "utilities") pod "ced033e2-978b-4840-8f84-77c75e881b8b" (UID: "ced033e2-978b-4840-8f84-77c75e881b8b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.539223 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ced033e2-978b-4840-8f84-77c75e881b8b-kube-api-access-jfnv7" (OuterVolumeSpecName: "kube-api-access-jfnv7") pod "ced033e2-978b-4840-8f84-77c75e881b8b" (UID: "ced033e2-978b-4840-8f84-77c75e881b8b"). InnerVolumeSpecName "kube-api-access-jfnv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.633556 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.633596 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfnv7\" (UniqueName: \"kubernetes.io/projected/ced033e2-978b-4840-8f84-77c75e881b8b-kube-api-access-jfnv7\") on node \"crc\" DevicePath \"\"" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.655975 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ced033e2-978b-4840-8f84-77c75e881b8b" (UID: "ced033e2-978b-4840-8f84-77c75e881b8b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.735565 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ced033e2-978b-4840-8f84-77c75e881b8b-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.880406 4650 generic.go:334] "Generic (PLEG): container finished" podID="ced033e2-978b-4840-8f84-77c75e881b8b" containerID="468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138" exitCode=0 Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.880470 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwprt" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.881767 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwprt" event={"ID":"ced033e2-978b-4840-8f84-77c75e881b8b","Type":"ContainerDied","Data":"468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138"} Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.881987 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwprt" event={"ID":"ced033e2-978b-4840-8f84-77c75e881b8b","Type":"ContainerDied","Data":"05b4384ddde8693c0dd71c1b45d3b2c330ae4d7d99e237cccbf15e4b47a7d3ff"} Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.882057 4650 scope.go:117] "RemoveContainer" containerID="468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.911444 4650 scope.go:117] "RemoveContainer" containerID="7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.924845 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dwprt"] Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.936074 4650 scope.go:117] "RemoveContainer" containerID="75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.939668 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dwprt"] Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.979988 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" path="/var/lib/kubelet/pods/ced033e2-978b-4840-8f84-77c75e881b8b/volumes" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.983669 4650 scope.go:117] "RemoveContainer" containerID="468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138" Feb 01 08:01:13 crc kubenswrapper[4650]: E0201 08:01:13.984067 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138\": container with ID starting with 468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138 not found: ID does not exist" containerID="468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.984108 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138"} err="failed to get container status \"468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138\": rpc error: code = NotFound desc = could not find container \"468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138\": container with ID starting with 468c5be727f71475411637592460c3c2359c4277658d60e27d0d578213e0f138 not found: ID does not exist" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.984136 4650 scope.go:117] "RemoveContainer" containerID="7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320" Feb 01 08:01:13 crc kubenswrapper[4650]: E0201 08:01:13.984459 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320\": container with ID starting with 7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320 not found: ID does not exist" containerID="7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.984484 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320"} err="failed to get container status \"7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320\": rpc error: code = NotFound desc = could not find container \"7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320\": container with ID starting with 7667878ccb43d961a1a0f783415e15833bb14b7583e282c24f53d0ff779d3320 not found: ID does not exist" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.984502 4650 scope.go:117] "RemoveContainer" containerID="75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c" Feb 01 08:01:13 crc kubenswrapper[4650]: E0201 08:01:13.984753 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c\": container with ID starting with 75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c not found: ID does not exist" containerID="75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c" Feb 01 08:01:13 crc kubenswrapper[4650]: I0201 08:01:13.984778 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c"} err="failed to get container status \"75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c\": rpc error: code = NotFound desc = could not find container \"75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c\": container with ID starting with 75ca309a14e54883e468c04fcac00eb5b3f4dc5a893c572c2766a5851da0469c not found: ID does not exist" Feb 01 08:01:14 crc kubenswrapper[4650]: I0201 08:01:14.966278 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:01:14 crc kubenswrapper[4650]: I0201 08:01:14.966707 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:01:14 crc kubenswrapper[4650]: I0201 08:01:14.966742 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:01:14 crc kubenswrapper[4650]: I0201 08:01:14.966808 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:01:14 crc kubenswrapper[4650]: I0201 08:01:14.966818 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:01:14 crc kubenswrapper[4650]: E0201 08:01:14.967304 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:01:19 crc kubenswrapper[4650]: I0201 08:01:19.965370 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:01:19 crc kubenswrapper[4650]: I0201 08:01:19.967137 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:01:19 crc kubenswrapper[4650]: E0201 08:01:19.967697 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:01:23 crc kubenswrapper[4650]: I0201 08:01:23.965349 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:01:23 crc kubenswrapper[4650]: E0201 08:01:23.966668 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:01:29 crc kubenswrapper[4650]: I0201 08:01:29.965921 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:01:29 crc kubenswrapper[4650]: I0201 08:01:29.966464 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:01:29 crc kubenswrapper[4650]: I0201 08:01:29.966487 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:01:29 crc kubenswrapper[4650]: I0201 08:01:29.966542 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:01:29 crc kubenswrapper[4650]: I0201 08:01:29.966549 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:01:29 crc kubenswrapper[4650]: E0201 08:01:29.966915 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:01:34 crc kubenswrapper[4650]: I0201 08:01:34.965899 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:01:34 crc kubenswrapper[4650]: I0201 08:01:34.966445 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:01:34 crc kubenswrapper[4650]: E0201 08:01:34.966694 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:01:38 crc kubenswrapper[4650]: I0201 08:01:38.965528 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:01:38 crc kubenswrapper[4650]: E0201 08:01:38.966834 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:01:42 crc kubenswrapper[4650]: I0201 08:01:42.967183 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:01:42 crc kubenswrapper[4650]: I0201 08:01:42.968001 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:01:42 crc kubenswrapper[4650]: I0201 08:01:42.968159 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:01:42 crc kubenswrapper[4650]: I0201 08:01:42.968259 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:01:42 crc kubenswrapper[4650]: I0201 08:01:42.968272 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:01:42 crc kubenswrapper[4650]: E0201 08:01:42.969431 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:01:47 crc kubenswrapper[4650]: I0201 08:01:47.966544 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:01:47 crc kubenswrapper[4650]: I0201 08:01:47.967474 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:01:47 crc kubenswrapper[4650]: E0201 08:01:47.968188 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:01:52 crc kubenswrapper[4650]: I0201 08:01:52.965397 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:01:52 crc kubenswrapper[4650]: E0201 08:01:52.966366 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:01:53 crc kubenswrapper[4650]: I0201 08:01:53.966742 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:01:53 crc kubenswrapper[4650]: I0201 08:01:53.966832 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:01:53 crc kubenswrapper[4650]: I0201 08:01:53.966862 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:01:53 crc kubenswrapper[4650]: I0201 08:01:53.966930 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:01:53 crc kubenswrapper[4650]: I0201 08:01:53.966942 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:01:54 crc kubenswrapper[4650]: E0201 08:01:54.182669 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:01:54 crc kubenswrapper[4650]: I0201 08:01:54.367345 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec"} Feb 01 08:01:54 crc kubenswrapper[4650]: I0201 08:01:54.368551 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:01:54 crc kubenswrapper[4650]: I0201 08:01:54.368776 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:01:54 crc kubenswrapper[4650]: I0201 08:01:54.368810 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:01:54 crc kubenswrapper[4650]: I0201 08:01:54.368877 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:01:54 crc kubenswrapper[4650]: E0201 08:01:54.370223 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:01:59 crc kubenswrapper[4650]: I0201 08:01:59.965182 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:01:59 crc kubenswrapper[4650]: I0201 08:01:59.965643 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:01:59 crc kubenswrapper[4650]: E0201 08:01:59.966100 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:02:03 crc kubenswrapper[4650]: I0201 08:02:03.964877 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:02:03 crc kubenswrapper[4650]: E0201 08:02:03.966354 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:02:04 crc kubenswrapper[4650]: I0201 08:02:04.966288 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:02:04 crc kubenswrapper[4650]: I0201 08:02:04.967519 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:02:04 crc kubenswrapper[4650]: I0201 08:02:04.967604 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:02:04 crc kubenswrapper[4650]: I0201 08:02:04.967769 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:02:05 crc kubenswrapper[4650]: I0201 08:02:05.478478 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb"} Feb 01 08:02:05 crc kubenswrapper[4650]: I0201 08:02:05.478811 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143"} Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.495895 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" exitCode=1 Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.495942 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" exitCode=1 Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.495956 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" exitCode=1 Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.495982 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb"} Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.496015 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143"} Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.496049 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df"} Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.496062 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0"} Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.496084 4650 scope.go:117] "RemoveContainer" containerID="ccc1f6a2dbc0913fc6f3ea14ba6bfbb6b110b06229e7f1619ec33217c0418e4d" Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.497180 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.497286 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.497537 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:02:06 crc kubenswrapper[4650]: E0201 08:02:06.498119 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.582528 4650 scope.go:117] "RemoveContainer" containerID="0c1d966cc5d0bd00d7fc3af3c07ed78c56e318c9b2d32ee7e4a67b7a37169dc9" Feb 01 08:02:06 crc kubenswrapper[4650]: I0201 08:02:06.631014 4650 scope.go:117] "RemoveContainer" containerID="0fe52d571d11f36f7ae2be41d16000990c6d48fbd8befe010e3a4214573dd938" Feb 01 08:02:07 crc kubenswrapper[4650]: I0201 08:02:07.514748 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:02:07 crc kubenswrapper[4650]: I0201 08:02:07.514905 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:02:07 crc kubenswrapper[4650]: I0201 08:02:07.515137 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:02:07 crc kubenswrapper[4650]: E0201 08:02:07.517907 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:02:13 crc kubenswrapper[4650]: I0201 08:02:13.966313 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:02:13 crc kubenswrapper[4650]: I0201 08:02:13.967300 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:02:13 crc kubenswrapper[4650]: E0201 08:02:13.968114 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:02:16 crc kubenswrapper[4650]: I0201 08:02:16.965704 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:02:16 crc kubenswrapper[4650]: E0201 08:02:16.966329 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:02:20 crc kubenswrapper[4650]: I0201 08:02:20.965298 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:02:20 crc kubenswrapper[4650]: I0201 08:02:20.965839 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:02:20 crc kubenswrapper[4650]: I0201 08:02:20.965933 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:02:20 crc kubenswrapper[4650]: E0201 08:02:20.966274 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:02:24 crc kubenswrapper[4650]: I0201 08:02:24.965499 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:02:24 crc kubenswrapper[4650]: I0201 08:02:24.966148 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:02:24 crc kubenswrapper[4650]: E0201 08:02:24.966412 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:02:30 crc kubenswrapper[4650]: I0201 08:02:30.966301 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:02:30 crc kubenswrapper[4650]: E0201 08:02:30.967696 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:02:32 crc kubenswrapper[4650]: I0201 08:02:32.965873 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:02:32 crc kubenswrapper[4650]: I0201 08:02:32.966462 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:02:32 crc kubenswrapper[4650]: I0201 08:02:32.966545 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:02:32 crc kubenswrapper[4650]: E0201 08:02:32.966811 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:02:36 crc kubenswrapper[4650]: I0201 08:02:36.965787 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:02:36 crc kubenswrapper[4650]: I0201 08:02:36.966083 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:02:36 crc kubenswrapper[4650]: E0201 08:02:36.966380 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:02:44 crc kubenswrapper[4650]: I0201 08:02:44.966731 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:02:44 crc kubenswrapper[4650]: I0201 08:02:44.967618 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:02:44 crc kubenswrapper[4650]: I0201 08:02:44.967864 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:02:44 crc kubenswrapper[4650]: E0201 08:02:44.968487 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:02:45 crc kubenswrapper[4650]: I0201 08:02:45.965787 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:02:45 crc kubenswrapper[4650]: E0201 08:02:45.966581 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:02:48 crc kubenswrapper[4650]: I0201 08:02:48.965568 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:02:48 crc kubenswrapper[4650]: I0201 08:02:48.966142 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:02:48 crc kubenswrapper[4650]: E0201 08:02:48.966446 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:02:53 crc kubenswrapper[4650]: I0201 08:02:53.185874 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:02:53 crc kubenswrapper[4650]: E0201 08:02:53.186106 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:02:53 crc kubenswrapper[4650]: E0201 08:02:53.186725 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:04:55.186697142 +0000 UTC m=+2493.909795407 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:02:56 crc kubenswrapper[4650]: I0201 08:02:56.966966 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:02:56 crc kubenswrapper[4650]: I0201 08:02:56.967465 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:02:56 crc kubenswrapper[4650]: I0201 08:02:56.967661 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:02:56 crc kubenswrapper[4650]: E0201 08:02:56.968283 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:02:57 crc kubenswrapper[4650]: I0201 08:02:57.967556 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:02:57 crc kubenswrapper[4650]: E0201 08:02:57.968000 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:03:01 crc kubenswrapper[4650]: I0201 08:03:01.973325 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:01 crc kubenswrapper[4650]: I0201 08:03:01.974337 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:03:01 crc kubenswrapper[4650]: E0201 08:03:01.975053 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:02 crc kubenswrapper[4650]: E0201 08:03:02.732320 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:03:02 crc kubenswrapper[4650]: I0201 08:03:02.754012 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:03:11 crc kubenswrapper[4650]: I0201 08:03:11.970971 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:03:11 crc kubenswrapper[4650]: E0201 08:03:11.971823 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:03:11 crc kubenswrapper[4650]: I0201 08:03:11.972071 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:03:11 crc kubenswrapper[4650]: I0201 08:03:11.972203 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:03:11 crc kubenswrapper[4650]: I0201 08:03:11.972384 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:03:11 crc kubenswrapper[4650]: E0201 08:03:11.972895 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:03:15 crc kubenswrapper[4650]: I0201 08:03:15.966168 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:15 crc kubenswrapper[4650]: I0201 08:03:15.966618 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:03:15 crc kubenswrapper[4650]: E0201 08:03:15.967305 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:23 crc kubenswrapper[4650]: I0201 08:03:23.966717 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:03:23 crc kubenswrapper[4650]: I0201 08:03:23.967540 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:03:23 crc kubenswrapper[4650]: I0201 08:03:23.967702 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:03:23 crc kubenswrapper[4650]: E0201 08:03:23.968230 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:03:24 crc kubenswrapper[4650]: I0201 08:03:24.966296 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:03:24 crc kubenswrapper[4650]: E0201 08:03:24.967142 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:03:26 crc kubenswrapper[4650]: I0201 08:03:26.965449 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:26 crc kubenswrapper[4650]: I0201 08:03:26.965482 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:03:26 crc kubenswrapper[4650]: E0201 08:03:26.965744 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:29 crc kubenswrapper[4650]: I0201 08:03:29.750661 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="ce8e76c5-52b4-46aa-b009-181f08e5cdc7" containerName="galera" probeResult="failure" output="command timed out" Feb 01 08:03:34 crc kubenswrapper[4650]: I0201 08:03:34.101516 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" exitCode=1 Feb 01 08:03:34 crc kubenswrapper[4650]: I0201 08:03:34.101595 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0"} Feb 01 08:03:34 crc kubenswrapper[4650]: I0201 08:03:34.102189 4650 scope.go:117] "RemoveContainer" containerID="84bed39c1224b205d90ce0fc4229620a28e0256114e7efa0374535c3fdb0f0bd" Feb 01 08:03:34 crc kubenswrapper[4650]: I0201 08:03:34.103351 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:03:34 crc kubenswrapper[4650]: I0201 08:03:34.103673 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:03:34 crc kubenswrapper[4650]: I0201 08:03:34.103765 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:03:34 crc kubenswrapper[4650]: I0201 08:03:34.103960 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:03:34 crc kubenswrapper[4650]: E0201 08:03:34.105016 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:03:35 crc kubenswrapper[4650]: I0201 08:03:35.966058 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:03:35 crc kubenswrapper[4650]: E0201 08:03:35.966543 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:03:41 crc kubenswrapper[4650]: I0201 08:03:41.970656 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:41 crc kubenswrapper[4650]: I0201 08:03:41.971110 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:03:41 crc kubenswrapper[4650]: E0201 08:03:41.971361 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:46 crc kubenswrapper[4650]: I0201 08:03:46.965090 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:03:46 crc kubenswrapper[4650]: I0201 08:03:46.965706 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:03:46 crc kubenswrapper[4650]: I0201 08:03:46.965784 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:03:46 crc kubenswrapper[4650]: I0201 08:03:46.965813 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:03:46 crc kubenswrapper[4650]: E0201 08:03:46.965837 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:03:46 crc kubenswrapper[4650]: I0201 08:03:46.965890 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:03:46 crc kubenswrapper[4650]: E0201 08:03:46.966288 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:03:55 crc kubenswrapper[4650]: I0201 08:03:55.965311 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:55 crc kubenswrapper[4650]: I0201 08:03:55.965908 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:03:56 crc kubenswrapper[4650]: E0201 08:03:56.145684 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:56 crc kubenswrapper[4650]: I0201 08:03:56.330427 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd"} Feb 01 08:03:56 crc kubenswrapper[4650]: I0201 08:03:56.330713 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:03:56 crc kubenswrapper[4650]: I0201 08:03:56.331240 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:56 crc kubenswrapper[4650]: E0201 08:03:56.331558 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.345821 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" exitCode=1 Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.345881 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd"} Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.345940 4650 scope.go:117] "RemoveContainer" containerID="caf374af1d6027761a0327c469871286c7186073085cc1ff6e7695b109db4d2b" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.346343 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.346368 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:03:57 crc kubenswrapper[4650]: E0201 08:03:57.346653 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.799676 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.966551 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.966644 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.966677 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:03:57 crc kubenswrapper[4650]: I0201 08:03:57.966784 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:03:57 crc kubenswrapper[4650]: E0201 08:03:57.967213 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:03:58 crc kubenswrapper[4650]: I0201 08:03:58.357159 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:58 crc kubenswrapper[4650]: I0201 08:03:58.358405 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:03:58 crc kubenswrapper[4650]: E0201 08:03:58.358735 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:59 crc kubenswrapper[4650]: I0201 08:03:59.363618 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:03:59 crc kubenswrapper[4650]: I0201 08:03:59.363648 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:03:59 crc kubenswrapper[4650]: E0201 08:03:59.364171 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:03:59 crc kubenswrapper[4650]: I0201 08:03:59.965165 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:03:59 crc kubenswrapper[4650]: E0201 08:03:59.965854 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:04:08 crc kubenswrapper[4650]: I0201 08:04:08.967940 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:04:08 crc kubenswrapper[4650]: I0201 08:04:08.969330 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:04:08 crc kubenswrapper[4650]: I0201 08:04:08.969407 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:04:08 crc kubenswrapper[4650]: I0201 08:04:08.969572 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:04:08 crc kubenswrapper[4650]: E0201 08:04:08.970901 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:04:10 crc kubenswrapper[4650]: I0201 08:04:10.965612 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:04:10 crc kubenswrapper[4650]: E0201 08:04:10.965862 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:04:12 crc kubenswrapper[4650]: I0201 08:04:12.965361 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:04:12 crc kubenswrapper[4650]: I0201 08:04:12.965686 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:04:12 crc kubenswrapper[4650]: E0201 08:04:12.965919 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:04:20 crc kubenswrapper[4650]: I0201 08:04:20.965113 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:04:20 crc kubenswrapper[4650]: I0201 08:04:20.965679 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:04:20 crc kubenswrapper[4650]: I0201 08:04:20.965709 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:04:20 crc kubenswrapper[4650]: I0201 08:04:20.965786 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:04:20 crc kubenswrapper[4650]: E0201 08:04:20.966144 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:04:21 crc kubenswrapper[4650]: I0201 08:04:21.975324 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:04:21 crc kubenswrapper[4650]: E0201 08:04:21.975603 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:04:23 crc kubenswrapper[4650]: I0201 08:04:23.966631 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:04:23 crc kubenswrapper[4650]: I0201 08:04:23.967217 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:04:23 crc kubenswrapper[4650]: E0201 08:04:23.967657 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:04:33 crc kubenswrapper[4650]: I0201 08:04:33.966436 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:04:33 crc kubenswrapper[4650]: I0201 08:04:33.967276 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:04:33 crc kubenswrapper[4650]: I0201 08:04:33.967468 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:04:33 crc kubenswrapper[4650]: I0201 08:04:33.967531 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:04:33 crc kubenswrapper[4650]: I0201 08:04:33.967719 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:04:33 crc kubenswrapper[4650]: E0201 08:04:33.967957 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:04:33 crc kubenswrapper[4650]: E0201 08:04:33.968559 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:04:36 crc kubenswrapper[4650]: I0201 08:04:36.965843 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:04:36 crc kubenswrapper[4650]: I0201 08:04:36.966546 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:04:36 crc kubenswrapper[4650]: E0201 08:04:36.966982 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.783363 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" exitCode=1 Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.783731 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec"} Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.783781 4650 scope.go:117] "RemoveContainer" containerID="b97280e0ed4d76c45bcae2e71d0aaff3b46058139a4ac3291e91cfe7220e1000" Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.785619 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.787274 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.789151 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.789350 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:04:39 crc kubenswrapper[4650]: I0201 08:04:39.789414 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:04:39 crc kubenswrapper[4650]: E0201 08:04:39.792864 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:04:47 crc kubenswrapper[4650]: I0201 08:04:47.966520 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:04:47 crc kubenswrapper[4650]: E0201 08:04:47.969161 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:04:49 crc kubenswrapper[4650]: I0201 08:04:49.965715 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:04:49 crc kubenswrapper[4650]: I0201 08:04:49.966061 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:04:49 crc kubenswrapper[4650]: E0201 08:04:49.966269 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:04:50 crc kubenswrapper[4650]: I0201 08:04:50.966326 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:04:50 crc kubenswrapper[4650]: I0201 08:04:50.966518 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:04:50 crc kubenswrapper[4650]: I0201 08:04:50.966581 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:04:50 crc kubenswrapper[4650]: I0201 08:04:50.966716 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:04:50 crc kubenswrapper[4650]: I0201 08:04:50.967150 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:04:50 crc kubenswrapper[4650]: E0201 08:04:50.968130 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:04:55 crc kubenswrapper[4650]: I0201 08:04:55.270840 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:04:55 crc kubenswrapper[4650]: E0201 08:04:55.271145 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:04:55 crc kubenswrapper[4650]: E0201 08:04:55.271704 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:06:57.271680446 +0000 UTC m=+2615.994778691 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:04:59 crc kubenswrapper[4650]: I0201 08:04:59.966221 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:04:59 crc kubenswrapper[4650]: E0201 08:04:59.967223 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:05:00 crc kubenswrapper[4650]: I0201 08:05:00.965697 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:05:00 crc kubenswrapper[4650]: I0201 08:05:00.966180 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:05:00 crc kubenswrapper[4650]: E0201 08:05:00.966775 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:05:02 crc kubenswrapper[4650]: I0201 08:05:02.966382 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:05:02 crc kubenswrapper[4650]: I0201 08:05:02.968214 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:05:02 crc kubenswrapper[4650]: I0201 08:05:02.968351 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:05:02 crc kubenswrapper[4650]: I0201 08:05:02.968491 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:05:02 crc kubenswrapper[4650]: I0201 08:05:02.968576 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:05:02 crc kubenswrapper[4650]: E0201 08:05:02.969411 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:05:05 crc kubenswrapper[4650]: E0201 08:05:05.755754 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:05:06 crc kubenswrapper[4650]: I0201 08:05:06.023622 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:05:13 crc kubenswrapper[4650]: I0201 08:05:13.967060 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:05:13 crc kubenswrapper[4650]: I0201 08:05:13.967698 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:05:13 crc kubenswrapper[4650]: I0201 08:05:13.967805 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:05:13 crc kubenswrapper[4650]: I0201 08:05:13.967908 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:05:13 crc kubenswrapper[4650]: I0201 08:05:13.967923 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:05:13 crc kubenswrapper[4650]: E0201 08:05:13.968639 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:05:14 crc kubenswrapper[4650]: I0201 08:05:14.965645 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:05:14 crc kubenswrapper[4650]: I0201 08:05:14.965856 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:05:14 crc kubenswrapper[4650]: I0201 08:05:14.966385 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:05:14 crc kubenswrapper[4650]: E0201 08:05:14.966950 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:05:16 crc kubenswrapper[4650]: I0201 08:05:16.139755 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"602c56a59dd65049f4631ec9296207f999808cfcf7cd5f0739471580c8fa001e"} Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.363299 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wl7w4"] Feb 01 08:05:23 crc kubenswrapper[4650]: E0201 08:05:23.366787 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="extract-content" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.366808 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="extract-content" Feb 01 08:05:23 crc kubenswrapper[4650]: E0201 08:05:23.366827 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44e12371-b3b4-4575-8595-6f212ce4cb89" containerName="keystone-cron" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.366835 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="44e12371-b3b4-4575-8595-6f212ce4cb89" containerName="keystone-cron" Feb 01 08:05:23 crc kubenswrapper[4650]: E0201 08:05:23.366864 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="extract-utilities" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.366872 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="extract-utilities" Feb 01 08:05:23 crc kubenswrapper[4650]: E0201 08:05:23.366890 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="registry-server" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.366898 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="registry-server" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.367111 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="ced033e2-978b-4840-8f84-77c75e881b8b" containerName="registry-server" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.367143 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="44e12371-b3b4-4575-8595-6f212ce4cb89" containerName="keystone-cron" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.368748 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.427171 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-utilities\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.427230 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-catalog-content\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.427281 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28ns6\" (UniqueName: \"kubernetes.io/projected/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-kube-api-access-28ns6\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.428749 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wl7w4"] Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.530046 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-utilities\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.530964 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-catalog-content\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.530827 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-utilities\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.531360 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-catalog-content\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.531992 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28ns6\" (UniqueName: \"kubernetes.io/projected/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-kube-api-access-28ns6\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.598649 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28ns6\" (UniqueName: \"kubernetes.io/projected/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-kube-api-access-28ns6\") pod \"community-operators-wl7w4\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:23 crc kubenswrapper[4650]: I0201 08:05:23.745417 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:24 crc kubenswrapper[4650]: I0201 08:05:24.189137 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wl7w4"] Feb 01 08:05:24 crc kubenswrapper[4650]: I0201 08:05:24.215360 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl7w4" event={"ID":"63d8a90d-b159-4c45-b6de-ea8ac2e267cf","Type":"ContainerStarted","Data":"18199f2b44eb5f25ae8407a3ef5ab1a18ccba8fd74cc317bc27c8d7bb1b03910"} Feb 01 08:05:25 crc kubenswrapper[4650]: I0201 08:05:25.227643 4650 generic.go:334] "Generic (PLEG): container finished" podID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerID="3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650" exitCode=0 Feb 01 08:05:25 crc kubenswrapper[4650]: I0201 08:05:25.228002 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl7w4" event={"ID":"63d8a90d-b159-4c45-b6de-ea8ac2e267cf","Type":"ContainerDied","Data":"3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650"} Feb 01 08:05:26 crc kubenswrapper[4650]: I0201 08:05:26.238414 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl7w4" event={"ID":"63d8a90d-b159-4c45-b6de-ea8ac2e267cf","Type":"ContainerStarted","Data":"38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376"} Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.264796 4650 generic.go:334] "Generic (PLEG): container finished" podID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerID="38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376" exitCode=0 Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.265113 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl7w4" event={"ID":"63d8a90d-b159-4c45-b6de-ea8ac2e267cf","Type":"ContainerDied","Data":"38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376"} Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.964876 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.964902 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:05:28 crc kubenswrapper[4650]: E0201 08:05:28.965123 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.965692 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.965746 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.965766 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.965807 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:05:28 crc kubenswrapper[4650]: I0201 08:05:28.965812 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:05:28 crc kubenswrapper[4650]: E0201 08:05:28.966084 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:05:29 crc kubenswrapper[4650]: I0201 08:05:29.275665 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl7w4" event={"ID":"63d8a90d-b159-4c45-b6de-ea8ac2e267cf","Type":"ContainerStarted","Data":"65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d"} Feb 01 08:05:29 crc kubenswrapper[4650]: I0201 08:05:29.294214 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wl7w4" podStartSLOduration=2.767098595 podStartE2EDuration="6.294191893s" podCreationTimestamp="2026-02-01 08:05:23 +0000 UTC" firstStartedPulling="2026-02-01 08:05:25.231473915 +0000 UTC m=+2523.954572210" lastFinishedPulling="2026-02-01 08:05:28.758567263 +0000 UTC m=+2527.481665508" observedRunningTime="2026-02-01 08:05:29.292428887 +0000 UTC m=+2528.015527132" watchObservedRunningTime="2026-02-01 08:05:29.294191893 +0000 UTC m=+2528.017290158" Feb 01 08:05:33 crc kubenswrapper[4650]: I0201 08:05:33.746011 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:33 crc kubenswrapper[4650]: I0201 08:05:33.746458 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:33 crc kubenswrapper[4650]: I0201 08:05:33.816406 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:34 crc kubenswrapper[4650]: I0201 08:05:34.388285 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:34 crc kubenswrapper[4650]: I0201 08:05:34.452754 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wl7w4"] Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.347573 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wl7w4" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="registry-server" containerID="cri-o://65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d" gracePeriod=2 Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.755990 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.838808 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-utilities\") pod \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.838929 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28ns6\" (UniqueName: \"kubernetes.io/projected/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-kube-api-access-28ns6\") pod \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.839056 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-catalog-content\") pod \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\" (UID: \"63d8a90d-b159-4c45-b6de-ea8ac2e267cf\") " Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.839547 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-utilities" (OuterVolumeSpecName: "utilities") pod "63d8a90d-b159-4c45-b6de-ea8ac2e267cf" (UID: "63d8a90d-b159-4c45-b6de-ea8ac2e267cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.848110 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-kube-api-access-28ns6" (OuterVolumeSpecName: "kube-api-access-28ns6") pod "63d8a90d-b159-4c45-b6de-ea8ac2e267cf" (UID: "63d8a90d-b159-4c45-b6de-ea8ac2e267cf"). InnerVolumeSpecName "kube-api-access-28ns6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.895308 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63d8a90d-b159-4c45-b6de-ea8ac2e267cf" (UID: "63d8a90d-b159-4c45-b6de-ea8ac2e267cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.941401 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.941430 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:05:36 crc kubenswrapper[4650]: I0201 08:05:36.941442 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28ns6\" (UniqueName: \"kubernetes.io/projected/63d8a90d-b159-4c45-b6de-ea8ac2e267cf-kube-api-access-28ns6\") on node \"crc\" DevicePath \"\"" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.372100 4650 generic.go:334] "Generic (PLEG): container finished" podID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerID="65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d" exitCode=0 Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.372182 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl7w4" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.372181 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl7w4" event={"ID":"63d8a90d-b159-4c45-b6de-ea8ac2e267cf","Type":"ContainerDied","Data":"65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d"} Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.372307 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl7w4" event={"ID":"63d8a90d-b159-4c45-b6de-ea8ac2e267cf","Type":"ContainerDied","Data":"18199f2b44eb5f25ae8407a3ef5ab1a18ccba8fd74cc317bc27c8d7bb1b03910"} Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.372343 4650 scope.go:117] "RemoveContainer" containerID="65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.423813 4650 scope.go:117] "RemoveContainer" containerID="38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.430017 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wl7w4"] Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.442094 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wl7w4"] Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.459250 4650 scope.go:117] "RemoveContainer" containerID="3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.501370 4650 scope.go:117] "RemoveContainer" containerID="65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d" Feb 01 08:05:37 crc kubenswrapper[4650]: E0201 08:05:37.501991 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d\": container with ID starting with 65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d not found: ID does not exist" containerID="65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.502079 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d"} err="failed to get container status \"65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d\": rpc error: code = NotFound desc = could not find container \"65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d\": container with ID starting with 65cadf386ff8ae59736e45f378e952748a11136533af80f0d9e555eade55e52d not found: ID does not exist" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.502110 4650 scope.go:117] "RemoveContainer" containerID="38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376" Feb 01 08:05:37 crc kubenswrapper[4650]: E0201 08:05:37.502763 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376\": container with ID starting with 38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376 not found: ID does not exist" containerID="38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.502822 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376"} err="failed to get container status \"38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376\": rpc error: code = NotFound desc = could not find container \"38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376\": container with ID starting with 38d9762eb98cf168d0080c3b3573aee4c977f3bfa1d04422799e5700b8ec8376 not found: ID does not exist" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.502859 4650 scope.go:117] "RemoveContainer" containerID="3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650" Feb 01 08:05:37 crc kubenswrapper[4650]: E0201 08:05:37.503265 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650\": container with ID starting with 3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650 not found: ID does not exist" containerID="3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.503300 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650"} err="failed to get container status \"3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650\": rpc error: code = NotFound desc = could not find container \"3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650\": container with ID starting with 3bbb83084a8a3df8ff070ac1375c0a0147fa8b9272e1837a3c6104210d60e650 not found: ID does not exist" Feb 01 08:05:37 crc kubenswrapper[4650]: I0201 08:05:37.985558 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" path="/var/lib/kubelet/pods/63d8a90d-b159-4c45-b6de-ea8ac2e267cf/volumes" Feb 01 08:05:40 crc kubenswrapper[4650]: I0201 08:05:40.966163 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:05:40 crc kubenswrapper[4650]: I0201 08:05:40.966505 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:05:40 crc kubenswrapper[4650]: I0201 08:05:40.966534 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:05:40 crc kubenswrapper[4650]: I0201 08:05:40.966618 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:05:40 crc kubenswrapper[4650]: I0201 08:05:40.966627 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:05:40 crc kubenswrapper[4650]: E0201 08:05:40.966984 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:05:40 crc kubenswrapper[4650]: I0201 08:05:40.968652 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:05:40 crc kubenswrapper[4650]: I0201 08:05:40.968683 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:05:40 crc kubenswrapper[4650]: E0201 08:05:40.968914 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:05:53 crc kubenswrapper[4650]: I0201 08:05:53.966865 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:05:53 crc kubenswrapper[4650]: I0201 08:05:53.967558 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:05:53 crc kubenswrapper[4650]: I0201 08:05:53.967589 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:05:53 crc kubenswrapper[4650]: I0201 08:05:53.967635 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:05:53 crc kubenswrapper[4650]: I0201 08:05:53.967642 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:05:53 crc kubenswrapper[4650]: E0201 08:05:53.968048 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:05:55 crc kubenswrapper[4650]: I0201 08:05:55.966404 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:05:55 crc kubenswrapper[4650]: I0201 08:05:55.966878 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:05:55 crc kubenswrapper[4650]: E0201 08:05:55.967494 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:06 crc kubenswrapper[4650]: I0201 08:06:06.965213 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:06:06 crc kubenswrapper[4650]: I0201 08:06:06.965828 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:06:06 crc kubenswrapper[4650]: I0201 08:06:06.965851 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:06:06 crc kubenswrapper[4650]: I0201 08:06:06.965894 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:06:06 crc kubenswrapper[4650]: I0201 08:06:06.965934 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:06 crc kubenswrapper[4650]: I0201 08:06:06.966148 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:06:06 crc kubenswrapper[4650]: I0201 08:06:06.966164 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:06:06 crc kubenswrapper[4650]: E0201 08:06:06.966491 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:06:07 crc kubenswrapper[4650]: E0201 08:06:07.134972 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:07 crc kubenswrapper[4650]: I0201 08:06:07.657966 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"499bf9f85d7da9f44cd71c55b9d8e9b0be7146cff5c621a8ec41c15a012dac71"} Feb 01 08:06:07 crc kubenswrapper[4650]: I0201 08:06:07.658581 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:07 crc kubenswrapper[4650]: E0201 08:06:07.658757 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:07 crc kubenswrapper[4650]: I0201 08:06:07.658800 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:06:08 crc kubenswrapper[4650]: I0201 08:06:08.670758 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:08 crc kubenswrapper[4650]: E0201 08:06:08.671586 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:12 crc kubenswrapper[4650]: I0201 08:06:12.811805 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:14 crc kubenswrapper[4650]: I0201 08:06:14.809729 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:15 crc kubenswrapper[4650]: I0201 08:06:15.808988 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:18 crc kubenswrapper[4650]: I0201 08:06:18.808380 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:18 crc kubenswrapper[4650]: I0201 08:06:18.808758 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:06:18 crc kubenswrapper[4650]: I0201 08:06:18.809762 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"499bf9f85d7da9f44cd71c55b9d8e9b0be7146cff5c621a8ec41c15a012dac71"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:06:18 crc kubenswrapper[4650]: I0201 08:06:18.809795 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:18 crc kubenswrapper[4650]: I0201 08:06:18.809831 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://499bf9f85d7da9f44cd71c55b9d8e9b0be7146cff5c621a8ec41c15a012dac71" gracePeriod=30 Feb 01 08:06:18 crc kubenswrapper[4650]: I0201 08:06:18.818133 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:19 crc kubenswrapper[4650]: E0201 08:06:19.153629 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.785949 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="499bf9f85d7da9f44cd71c55b9d8e9b0be7146cff5c621a8ec41c15a012dac71" exitCode=0 Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.786010 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"499bf9f85d7da9f44cd71c55b9d8e9b0be7146cff5c621a8ec41c15a012dac71"} Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.786065 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7"} Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.786092 4650 scope.go:117] "RemoveContainer" containerID="b515683747e9413ecd5503ca038b95f5fee7abe63c4061c552c14c4a022378cb" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.786351 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.787169 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:19 crc kubenswrapper[4650]: E0201 08:06:19.788109 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.965766 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.966139 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.966162 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.966207 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:06:19 crc kubenswrapper[4650]: I0201 08:06:19.966215 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:06:19 crc kubenswrapper[4650]: E0201 08:06:19.966984 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:06:20 crc kubenswrapper[4650]: I0201 08:06:20.802987 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:20 crc kubenswrapper[4650]: E0201 08:06:20.803356 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:24 crc kubenswrapper[4650]: I0201 08:06:24.812645 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:24 crc kubenswrapper[4650]: I0201 08:06:24.813262 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:27 crc kubenswrapper[4650]: I0201 08:06:27.813527 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:29 crc kubenswrapper[4650]: I0201 08:06:29.808863 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:30 crc kubenswrapper[4650]: I0201 08:06:30.804515 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:30 crc kubenswrapper[4650]: I0201 08:06:30.804877 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:06:30 crc kubenswrapper[4650]: I0201 08:06:30.805920 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:06:30 crc kubenswrapper[4650]: I0201 08:06:30.805960 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:30 crc kubenswrapper[4650]: I0201 08:06:30.806003 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" gracePeriod=30 Feb 01 08:06:30 crc kubenswrapper[4650]: I0201 08:06:30.809053 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:06:30 crc kubenswrapper[4650]: E0201 08:06:30.935012 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:31 crc kubenswrapper[4650]: I0201 08:06:31.923248 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" exitCode=0 Feb 01 08:06:31 crc kubenswrapper[4650]: I0201 08:06:31.923527 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7"} Feb 01 08:06:31 crc kubenswrapper[4650]: I0201 08:06:31.923563 4650 scope.go:117] "RemoveContainer" containerID="499bf9f85d7da9f44cd71c55b9d8e9b0be7146cff5c621a8ec41c15a012dac71" Feb 01 08:06:31 crc kubenswrapper[4650]: I0201 08:06:31.924206 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:06:31 crc kubenswrapper[4650]: I0201 08:06:31.924232 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:31 crc kubenswrapper[4650]: E0201 08:06:31.924439 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:32 crc kubenswrapper[4650]: I0201 08:06:32.968808 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:06:32 crc kubenswrapper[4650]: I0201 08:06:32.969369 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:06:32 crc kubenswrapper[4650]: I0201 08:06:32.969407 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:06:32 crc kubenswrapper[4650]: I0201 08:06:32.969520 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:06:32 crc kubenswrapper[4650]: I0201 08:06:32.969532 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:06:32 crc kubenswrapper[4650]: E0201 08:06:32.970482 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:06:43 crc kubenswrapper[4650]: I0201 08:06:43.966731 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:06:43 crc kubenswrapper[4650]: I0201 08:06:43.967934 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:43 crc kubenswrapper[4650]: E0201 08:06:43.968774 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:47 crc kubenswrapper[4650]: I0201 08:06:47.967195 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:06:47 crc kubenswrapper[4650]: I0201 08:06:47.968152 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:06:47 crc kubenswrapper[4650]: I0201 08:06:47.968203 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:06:47 crc kubenswrapper[4650]: I0201 08:06:47.968297 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:06:47 crc kubenswrapper[4650]: I0201 08:06:47.968312 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:06:47 crc kubenswrapper[4650]: E0201 08:06:47.969152 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:06:57 crc kubenswrapper[4650]: E0201 08:06:57.373550 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:06:57 crc kubenswrapper[4650]: I0201 08:06:57.373406 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:06:57 crc kubenswrapper[4650]: E0201 08:06:57.375052 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:08:59.37501477 +0000 UTC m=+2738.098113015 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.615429 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2vq8v"] Feb 01 08:06:58 crc kubenswrapper[4650]: E0201 08:06:58.616302 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="registry-server" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.616318 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="registry-server" Feb 01 08:06:58 crc kubenswrapper[4650]: E0201 08:06:58.616345 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="extract-utilities" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.616353 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="extract-utilities" Feb 01 08:06:58 crc kubenswrapper[4650]: E0201 08:06:58.616370 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="extract-content" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.616380 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="extract-content" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.616612 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="63d8a90d-b159-4c45-b6de-ea8ac2e267cf" containerName="registry-server" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.618292 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.641719 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vq8v"] Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.711331 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-catalog-content\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.711540 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-utilities\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.711597 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5brhl\" (UniqueName: \"kubernetes.io/projected/44585941-b571-4f28-aa95-f4b3e298b832-kube-api-access-5brhl\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.813735 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-catalog-content\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.813791 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-utilities\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.813839 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5brhl\" (UniqueName: \"kubernetes.io/projected/44585941-b571-4f28-aa95-f4b3e298b832-kube-api-access-5brhl\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.814337 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-catalog-content\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.814380 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-utilities\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.848959 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5brhl\" (UniqueName: \"kubernetes.io/projected/44585941-b571-4f28-aa95-f4b3e298b832-kube-api-access-5brhl\") pod \"redhat-marketplace-2vq8v\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.965017 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.965431 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:06:58 crc kubenswrapper[4650]: E0201 08:06:58.965720 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:06:58 crc kubenswrapper[4650]: I0201 08:06:58.984489 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:06:59 crc kubenswrapper[4650]: I0201 08:06:59.475643 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vq8v"] Feb 01 08:06:59 crc kubenswrapper[4650]: I0201 08:06:59.966373 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:06:59 crc kubenswrapper[4650]: I0201 08:06:59.966824 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:06:59 crc kubenswrapper[4650]: I0201 08:06:59.966870 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:06:59 crc kubenswrapper[4650]: I0201 08:06:59.966965 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:06:59 crc kubenswrapper[4650]: I0201 08:06:59.966977 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:06:59 crc kubenswrapper[4650]: E0201 08:06:59.967715 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:07:00 crc kubenswrapper[4650]: I0201 08:07:00.239448 4650 generic.go:334] "Generic (PLEG): container finished" podID="44585941-b571-4f28-aa95-f4b3e298b832" containerID="657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032" exitCode=0 Feb 01 08:07:00 crc kubenswrapper[4650]: I0201 08:07:00.239491 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vq8v" event={"ID":"44585941-b571-4f28-aa95-f4b3e298b832","Type":"ContainerDied","Data":"657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032"} Feb 01 08:07:00 crc kubenswrapper[4650]: I0201 08:07:00.239517 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vq8v" event={"ID":"44585941-b571-4f28-aa95-f4b3e298b832","Type":"ContainerStarted","Data":"cf85bf49c31dcc867ee38239dbfc76f8b78f3124db01a2f9c64b85dbaa0fb360"} Feb 01 08:07:00 crc kubenswrapper[4650]: I0201 08:07:00.242018 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 08:07:01 crc kubenswrapper[4650]: I0201 08:07:01.251088 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vq8v" event={"ID":"44585941-b571-4f28-aa95-f4b3e298b832","Type":"ContainerStarted","Data":"a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c"} Feb 01 08:07:02 crc kubenswrapper[4650]: I0201 08:07:02.267592 4650 generic.go:334] "Generic (PLEG): container finished" podID="44585941-b571-4f28-aa95-f4b3e298b832" containerID="a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c" exitCode=0 Feb 01 08:07:02 crc kubenswrapper[4650]: I0201 08:07:02.267707 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vq8v" event={"ID":"44585941-b571-4f28-aa95-f4b3e298b832","Type":"ContainerDied","Data":"a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c"} Feb 01 08:07:03 crc kubenswrapper[4650]: I0201 08:07:03.277920 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vq8v" event={"ID":"44585941-b571-4f28-aa95-f4b3e298b832","Type":"ContainerStarted","Data":"1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25"} Feb 01 08:07:03 crc kubenswrapper[4650]: I0201 08:07:03.296947 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2vq8v" podStartSLOduration=2.881515385 podStartE2EDuration="5.296929627s" podCreationTimestamp="2026-02-01 08:06:58 +0000 UTC" firstStartedPulling="2026-02-01 08:07:00.241597687 +0000 UTC m=+2618.964695972" lastFinishedPulling="2026-02-01 08:07:02.657011969 +0000 UTC m=+2621.380110214" observedRunningTime="2026-02-01 08:07:03.294217706 +0000 UTC m=+2622.017315961" watchObservedRunningTime="2026-02-01 08:07:03.296929627 +0000 UTC m=+2622.020027882" Feb 01 08:07:08 crc kubenswrapper[4650]: I0201 08:07:08.985200 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:07:08 crc kubenswrapper[4650]: I0201 08:07:08.986089 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:07:09 crc kubenswrapper[4650]: E0201 08:07:09.025515 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:07:09 crc kubenswrapper[4650]: I0201 08:07:09.043447 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:07:09 crc kubenswrapper[4650]: I0201 08:07:09.330560 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:07:09 crc kubenswrapper[4650]: I0201 08:07:09.385131 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:07:09 crc kubenswrapper[4650]: I0201 08:07:09.460346 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vq8v"] Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.362616 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2vq8v" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="registry-server" containerID="cri-o://1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25" gracePeriod=2 Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.972853 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.973210 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.973435 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.973504 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.973526 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.973571 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:07:11 crc kubenswrapper[4650]: I0201 08:07:11.973579 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:07:11 crc kubenswrapper[4650]: E0201 08:07:11.973699 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.006213 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.136767 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-catalog-content\") pod \"44585941-b571-4f28-aa95-f4b3e298b832\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.136923 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-utilities\") pod \"44585941-b571-4f28-aa95-f4b3e298b832\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.136968 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5brhl\" (UniqueName: \"kubernetes.io/projected/44585941-b571-4f28-aa95-f4b3e298b832-kube-api-access-5brhl\") pod \"44585941-b571-4f28-aa95-f4b3e298b832\" (UID: \"44585941-b571-4f28-aa95-f4b3e298b832\") " Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.138423 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-utilities" (OuterVolumeSpecName: "utilities") pod "44585941-b571-4f28-aa95-f4b3e298b832" (UID: "44585941-b571-4f28-aa95-f4b3e298b832"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.156325 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44585941-b571-4f28-aa95-f4b3e298b832-kube-api-access-5brhl" (OuterVolumeSpecName: "kube-api-access-5brhl") pod "44585941-b571-4f28-aa95-f4b3e298b832" (UID: "44585941-b571-4f28-aa95-f4b3e298b832"). InnerVolumeSpecName "kube-api-access-5brhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.165095 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44585941-b571-4f28-aa95-f4b3e298b832" (UID: "44585941-b571-4f28-aa95-f4b3e298b832"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.239295 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.239459 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5brhl\" (UniqueName: \"kubernetes.io/projected/44585941-b571-4f28-aa95-f4b3e298b832-kube-api-access-5brhl\") on node \"crc\" DevicePath \"\"" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.239519 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44585941-b571-4f28-aa95-f4b3e298b832-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.378253 4650 generic.go:334] "Generic (PLEG): container finished" podID="44585941-b571-4f28-aa95-f4b3e298b832" containerID="1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25" exitCode=0 Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.378349 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vq8v" event={"ID":"44585941-b571-4f28-aa95-f4b3e298b832","Type":"ContainerDied","Data":"1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25"} Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.378441 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vq8v" event={"ID":"44585941-b571-4f28-aa95-f4b3e298b832","Type":"ContainerDied","Data":"cf85bf49c31dcc867ee38239dbfc76f8b78f3124db01a2f9c64b85dbaa0fb360"} Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.378487 4650 scope.go:117] "RemoveContainer" containerID="1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.378534 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vq8v" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.389211 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6"} Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.396160 4650 scope.go:117] "RemoveContainer" containerID="a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.448842 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vq8v"] Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.460563 4650 scope.go:117] "RemoveContainer" containerID="657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.465852 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vq8v"] Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.482453 4650 scope.go:117] "RemoveContainer" containerID="1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25" Feb 01 08:07:12 crc kubenswrapper[4650]: E0201 08:07:12.485648 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25\": container with ID starting with 1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25 not found: ID does not exist" containerID="1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.485688 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25"} err="failed to get container status \"1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25\": rpc error: code = NotFound desc = could not find container \"1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25\": container with ID starting with 1e85c263ceb94838c3006c82b0c6c750d29aec3bbee9e210129e2282a20afc25 not found: ID does not exist" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.485733 4650 scope.go:117] "RemoveContainer" containerID="a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c" Feb 01 08:07:12 crc kubenswrapper[4650]: E0201 08:07:12.486286 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c\": container with ID starting with a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c not found: ID does not exist" containerID="a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.486308 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c"} err="failed to get container status \"a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c\": rpc error: code = NotFound desc = could not find container \"a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c\": container with ID starting with a1543a316c34d79e9d648761a0656a4bebcdfd0c5abf5ff95f1d965ff22f856c not found: ID does not exist" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.486356 4650 scope.go:117] "RemoveContainer" containerID="657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032" Feb 01 08:07:12 crc kubenswrapper[4650]: E0201 08:07:12.486690 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032\": container with ID starting with 657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032 not found: ID does not exist" containerID="657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032" Feb 01 08:07:12 crc kubenswrapper[4650]: I0201 08:07:12.486711 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032"} err="failed to get container status \"657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032\": rpc error: code = NotFound desc = could not find container \"657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032\": container with ID starting with 657c64e2eff7b0469500053496741508e58052532edfbf8579066f3e67cb7032 not found: ID does not exist" Feb 01 08:07:12 crc kubenswrapper[4650]: E0201 08:07:12.823585 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.420494 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" exitCode=1 Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.420569 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" exitCode=1 Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.420590 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" exitCode=1 Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.420629 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6"} Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.420682 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818"} Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.420712 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0"} Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.420746 4650 scope.go:117] "RemoveContainer" containerID="1acda61aef78a5049db62501f367693156f68a977cc1a8e0e9ddfcec93410143" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.423934 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.424176 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.424260 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.424392 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.424413 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:07:13 crc kubenswrapper[4650]: E0201 08:07:13.425446 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.504903 4650 scope.go:117] "RemoveContainer" containerID="b6126e03d202471593a8c420e3b66d9d2db06907b1d44b8cc78801f28523c0df" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.569695 4650 scope.go:117] "RemoveContainer" containerID="a05d96fd2dfa019fd74397dd31098076ac6c371940f7dca9e6addc0515f292cb" Feb 01 08:07:13 crc kubenswrapper[4650]: I0201 08:07:13.983079 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44585941-b571-4f28-aa95-f4b3e298b832" path="/var/lib/kubelet/pods/44585941-b571-4f28-aa95-f4b3e298b832/volumes" Feb 01 08:07:14 crc kubenswrapper[4650]: I0201 08:07:14.439948 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:07:14 crc kubenswrapper[4650]: I0201 08:07:14.440207 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:07:14 crc kubenswrapper[4650]: I0201 08:07:14.440256 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:07:14 crc kubenswrapper[4650]: I0201 08:07:14.440478 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:07:14 crc kubenswrapper[4650]: I0201 08:07:14.440535 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:07:14 crc kubenswrapper[4650]: E0201 08:07:14.441798 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:07:22 crc kubenswrapper[4650]: I0201 08:07:22.964937 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:07:22 crc kubenswrapper[4650]: I0201 08:07:22.965507 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:07:22 crc kubenswrapper[4650]: E0201 08:07:22.965706 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:07:25 crc kubenswrapper[4650]: I0201 08:07:25.966097 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:07:25 crc kubenswrapper[4650]: I0201 08:07:25.966477 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:07:25 crc kubenswrapper[4650]: I0201 08:07:25.966501 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:07:25 crc kubenswrapper[4650]: I0201 08:07:25.966559 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:07:25 crc kubenswrapper[4650]: I0201 08:07:25.966572 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:07:26 crc kubenswrapper[4650]: E0201 08:07:26.202989 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:07:26 crc kubenswrapper[4650]: I0201 08:07:26.556146 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1"} Feb 01 08:07:26 crc kubenswrapper[4650]: I0201 08:07:26.556748 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:07:26 crc kubenswrapper[4650]: I0201 08:07:26.556846 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:07:26 crc kubenswrapper[4650]: I0201 08:07:26.556889 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:07:26 crc kubenswrapper[4650]: I0201 08:07:26.556982 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:07:26 crc kubenswrapper[4650]: E0201 08:07:26.557453 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:07:35 crc kubenswrapper[4650]: I0201 08:07:35.965375 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:07:35 crc kubenswrapper[4650]: I0201 08:07:35.965783 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:07:35 crc kubenswrapper[4650]: E0201 08:07:35.966014 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:07:37 crc kubenswrapper[4650]: I0201 08:07:37.161444 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:07:37 crc kubenswrapper[4650]: I0201 08:07:37.161502 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:07:38 crc kubenswrapper[4650]: I0201 08:07:38.969365 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:07:38 crc kubenswrapper[4650]: I0201 08:07:38.971934 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:07:38 crc kubenswrapper[4650]: I0201 08:07:38.972310 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:07:38 crc kubenswrapper[4650]: I0201 08:07:38.972718 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:07:38 crc kubenswrapper[4650]: E0201 08:07:38.973696 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:07:50 crc kubenswrapper[4650]: I0201 08:07:50.965493 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:07:50 crc kubenswrapper[4650]: I0201 08:07:50.966012 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:07:50 crc kubenswrapper[4650]: E0201 08:07:50.966289 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:07:50 crc kubenswrapper[4650]: I0201 08:07:50.966660 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:07:50 crc kubenswrapper[4650]: I0201 08:07:50.966848 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:07:50 crc kubenswrapper[4650]: I0201 08:07:50.966918 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:07:50 crc kubenswrapper[4650]: I0201 08:07:50.967166 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:07:50 crc kubenswrapper[4650]: E0201 08:07:50.967874 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:08:03 crc kubenswrapper[4650]: I0201 08:08:03.966314 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:08:03 crc kubenswrapper[4650]: I0201 08:08:03.966926 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:08:03 crc kubenswrapper[4650]: I0201 08:08:03.967436 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:08:03 crc kubenswrapper[4650]: E0201 08:08:03.967438 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:08:03 crc kubenswrapper[4650]: I0201 08:08:03.967587 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:08:03 crc kubenswrapper[4650]: I0201 08:08:03.967650 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:08:03 crc kubenswrapper[4650]: I0201 08:08:03.967813 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:08:03 crc kubenswrapper[4650]: E0201 08:08:03.968693 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:08:07 crc kubenswrapper[4650]: I0201 08:08:07.161520 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:08:07 crc kubenswrapper[4650]: I0201 08:08:07.162062 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:08:16 crc kubenswrapper[4650]: I0201 08:08:16.965645 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:08:16 crc kubenswrapper[4650]: I0201 08:08:16.967159 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:08:16 crc kubenswrapper[4650]: E0201 08:08:16.967550 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:08:18 crc kubenswrapper[4650]: I0201 08:08:18.965995 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:08:18 crc kubenswrapper[4650]: I0201 08:08:18.966086 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:08:18 crc kubenswrapper[4650]: I0201 08:08:18.966111 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:08:18 crc kubenswrapper[4650]: I0201 08:08:18.966179 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:08:18 crc kubenswrapper[4650]: E0201 08:08:18.966567 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:08:29 crc kubenswrapper[4650]: I0201 08:08:29.966720 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:08:29 crc kubenswrapper[4650]: I0201 08:08:29.969206 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:08:29 crc kubenswrapper[4650]: I0201 08:08:29.969481 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:08:29 crc kubenswrapper[4650]: I0201 08:08:29.969752 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:08:29 crc kubenswrapper[4650]: E0201 08:08:29.970536 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:08:31 crc kubenswrapper[4650]: I0201 08:08:31.972385 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:08:31 crc kubenswrapper[4650]: I0201 08:08:31.972765 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:08:31 crc kubenswrapper[4650]: E0201 08:08:31.973173 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:08:37 crc kubenswrapper[4650]: I0201 08:08:37.161834 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:08:37 crc kubenswrapper[4650]: I0201 08:08:37.162442 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:08:37 crc kubenswrapper[4650]: I0201 08:08:37.162496 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 08:08:37 crc kubenswrapper[4650]: I0201 08:08:37.163287 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"602c56a59dd65049f4631ec9296207f999808cfcf7cd5f0739471580c8fa001e"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 08:08:37 crc kubenswrapper[4650]: I0201 08:08:37.163347 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://602c56a59dd65049f4631ec9296207f999808cfcf7cd5f0739471580c8fa001e" gracePeriod=600 Feb 01 08:08:38 crc kubenswrapper[4650]: I0201 08:08:38.250885 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="602c56a59dd65049f4631ec9296207f999808cfcf7cd5f0739471580c8fa001e" exitCode=0 Feb 01 08:08:38 crc kubenswrapper[4650]: I0201 08:08:38.251057 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"602c56a59dd65049f4631ec9296207f999808cfcf7cd5f0739471580c8fa001e"} Feb 01 08:08:38 crc kubenswrapper[4650]: I0201 08:08:38.251465 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5"} Feb 01 08:08:38 crc kubenswrapper[4650]: I0201 08:08:38.251493 4650 scope.go:117] "RemoveContainer" containerID="a3bc1127fd28499a6f9dca9b6dc82031712e64f5a10f18bcbd4d1f3422ef908f" Feb 01 08:08:42 crc kubenswrapper[4650]: I0201 08:08:42.966296 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:08:42 crc kubenswrapper[4650]: I0201 08:08:42.967048 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:08:42 crc kubenswrapper[4650]: E0201 08:08:42.967471 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:08:44 crc kubenswrapper[4650]: I0201 08:08:44.964838 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:08:44 crc kubenswrapper[4650]: I0201 08:08:44.965185 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:08:44 crc kubenswrapper[4650]: I0201 08:08:44.965206 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:08:44 crc kubenswrapper[4650]: I0201 08:08:44.965261 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:08:45 crc kubenswrapper[4650]: E0201 08:08:45.166535 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:08:45 crc kubenswrapper[4650]: I0201 08:08:45.316812 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83"} Feb 01 08:08:45 crc kubenswrapper[4650]: I0201 08:08:45.317727 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:08:45 crc kubenswrapper[4650]: I0201 08:08:45.317809 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:08:45 crc kubenswrapper[4650]: I0201 08:08:45.317940 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:08:45 crc kubenswrapper[4650]: E0201 08:08:45.318430 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:08:55 crc kubenswrapper[4650]: I0201 08:08:55.966877 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:08:55 crc kubenswrapper[4650]: I0201 08:08:55.967423 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:08:55 crc kubenswrapper[4650]: E0201 08:08:55.967725 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:08:57 crc kubenswrapper[4650]: I0201 08:08:57.966379 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:08:57 crc kubenswrapper[4650]: I0201 08:08:57.966797 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:08:57 crc kubenswrapper[4650]: I0201 08:08:57.966982 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:08:57 crc kubenswrapper[4650]: E0201 08:08:57.967552 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:08:59 crc kubenswrapper[4650]: I0201 08:08:59.411793 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:08:59 crc kubenswrapper[4650]: E0201 08:08:59.412310 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:08:59 crc kubenswrapper[4650]: E0201 08:08:59.412368 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:11:01.412348719 +0000 UTC m=+2860.135446974 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:09:06 crc kubenswrapper[4650]: I0201 08:09:06.965899 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:06 crc kubenswrapper[4650]: I0201 08:09:06.966588 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:09:07 crc kubenswrapper[4650]: E0201 08:09:07.225801 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:07 crc kubenswrapper[4650]: I0201 08:09:07.556669 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d"} Feb 01 08:09:07 crc kubenswrapper[4650]: I0201 08:09:07.556915 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:09:07 crc kubenswrapper[4650]: I0201 08:09:07.557368 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:07 crc kubenswrapper[4650]: E0201 08:09:07.557591 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:08 crc kubenswrapper[4650]: I0201 08:09:08.568486 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" exitCode=1 Feb 01 08:09:08 crc kubenswrapper[4650]: I0201 08:09:08.568565 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d"} Feb 01 08:09:08 crc kubenswrapper[4650]: I0201 08:09:08.568872 4650 scope.go:117] "RemoveContainer" containerID="8138cd45909f82507193e49962330ace75ad66553d15673e33518ad8320a42bd" Feb 01 08:09:08 crc kubenswrapper[4650]: I0201 08:09:08.569286 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:08 crc kubenswrapper[4650]: I0201 08:09:08.569313 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:09:08 crc kubenswrapper[4650]: E0201 08:09:08.569737 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:09 crc kubenswrapper[4650]: I0201 08:09:09.583538 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:09 crc kubenswrapper[4650]: I0201 08:09:09.583575 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:09:09 crc kubenswrapper[4650]: E0201 08:09:09.584066 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:09 crc kubenswrapper[4650]: I0201 08:09:09.800496 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:09:10 crc kubenswrapper[4650]: I0201 08:09:10.597299 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:10 crc kubenswrapper[4650]: I0201 08:09:10.597334 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:09:10 crc kubenswrapper[4650]: E0201 08:09:10.597733 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:11 crc kubenswrapper[4650]: I0201 08:09:11.971100 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:09:11 crc kubenswrapper[4650]: I0201 08:09:11.971644 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:09:11 crc kubenswrapper[4650]: I0201 08:09:11.971756 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:09:11 crc kubenswrapper[4650]: E0201 08:09:11.972082 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:09:12 crc kubenswrapper[4650]: E0201 08:09:12.332833 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:09:12 crc kubenswrapper[4650]: I0201 08:09:12.608752 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:09:21 crc kubenswrapper[4650]: I0201 08:09:21.979494 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:21 crc kubenswrapper[4650]: I0201 08:09:21.980131 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:09:21 crc kubenswrapper[4650]: E0201 08:09:21.980848 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:26 crc kubenswrapper[4650]: I0201 08:09:26.965118 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:09:26 crc kubenswrapper[4650]: I0201 08:09:26.965720 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:09:26 crc kubenswrapper[4650]: I0201 08:09:26.965811 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:09:26 crc kubenswrapper[4650]: E0201 08:09:26.966096 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:09:32 crc kubenswrapper[4650]: I0201 08:09:32.966165 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:32 crc kubenswrapper[4650]: I0201 08:09:32.966811 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:09:32 crc kubenswrapper[4650]: E0201 08:09:32.967284 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:41 crc kubenswrapper[4650]: I0201 08:09:41.974735 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:09:41 crc kubenswrapper[4650]: I0201 08:09:41.975490 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:09:41 crc kubenswrapper[4650]: I0201 08:09:41.975646 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:09:41 crc kubenswrapper[4650]: E0201 08:09:41.976226 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:09:44 crc kubenswrapper[4650]: I0201 08:09:44.965164 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:44 crc kubenswrapper[4650]: I0201 08:09:44.965810 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:09:44 crc kubenswrapper[4650]: E0201 08:09:44.966170 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:09:54 crc kubenswrapper[4650]: I0201 08:09:54.966016 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:09:54 crc kubenswrapper[4650]: I0201 08:09:54.966716 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:09:54 crc kubenswrapper[4650]: I0201 08:09:54.966816 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:09:54 crc kubenswrapper[4650]: E0201 08:09:54.967184 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:09:57 crc kubenswrapper[4650]: I0201 08:09:57.006855 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" exitCode=1 Feb 01 08:09:57 crc kubenswrapper[4650]: I0201 08:09:57.006890 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83"} Feb 01 08:09:57 crc kubenswrapper[4650]: I0201 08:09:57.007174 4650 scope.go:117] "RemoveContainer" containerID="5ed089f018fae196de6b5e8ce187429dc3d7d82512f17a01c278620c21f6fad0" Feb 01 08:09:57 crc kubenswrapper[4650]: I0201 08:09:57.009359 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:09:57 crc kubenswrapper[4650]: I0201 08:09:57.009441 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:09:57 crc kubenswrapper[4650]: I0201 08:09:57.009467 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:09:57 crc kubenswrapper[4650]: I0201 08:09:57.009553 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:09:57 crc kubenswrapper[4650]: E0201 08:09:57.011627 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:09:58 crc kubenswrapper[4650]: I0201 08:09:58.964968 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:09:58 crc kubenswrapper[4650]: I0201 08:09:58.965276 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:09:58 crc kubenswrapper[4650]: E0201 08:09:58.965560 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:10:09 crc kubenswrapper[4650]: I0201 08:10:09.977187 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:10:09 crc kubenswrapper[4650]: I0201 08:10:09.980104 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:10:09 crc kubenswrapper[4650]: I0201 08:10:09.980338 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:10:09 crc kubenswrapper[4650]: I0201 08:10:09.980626 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:10:09 crc kubenswrapper[4650]: E0201 08:10:09.984651 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:10:10 crc kubenswrapper[4650]: I0201 08:10:10.966831 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:10:10 crc kubenswrapper[4650]: I0201 08:10:10.967325 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:10:10 crc kubenswrapper[4650]: E0201 08:10:10.967982 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:10:21 crc kubenswrapper[4650]: I0201 08:10:21.979822 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:10:21 crc kubenswrapper[4650]: I0201 08:10:21.980490 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:10:21 crc kubenswrapper[4650]: E0201 08:10:21.980951 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:10:22 crc kubenswrapper[4650]: I0201 08:10:22.966701 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:10:22 crc kubenswrapper[4650]: I0201 08:10:22.966798 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:10:22 crc kubenswrapper[4650]: I0201 08:10:22.966832 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:10:22 crc kubenswrapper[4650]: I0201 08:10:22.966920 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:10:22 crc kubenswrapper[4650]: E0201 08:10:22.967428 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.898582 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bzzm4/must-gather-zc9sw"] Feb 01 08:10:29 crc kubenswrapper[4650]: E0201 08:10:29.900144 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="registry-server" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.900235 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="registry-server" Feb 01 08:10:29 crc kubenswrapper[4650]: E0201 08:10:29.900307 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="extract-utilities" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.900370 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="extract-utilities" Feb 01 08:10:29 crc kubenswrapper[4650]: E0201 08:10:29.900430 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="extract-content" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.900480 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="extract-content" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.900711 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="44585941-b571-4f28-aa95-f4b3e298b832" containerName="registry-server" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.901720 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.904480 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-bzzm4"/"kube-root-ca.crt" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.907206 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-bzzm4"/"openshift-service-ca.crt" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.919564 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bzzm4/must-gather-zc9sw"] Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.955854 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hbcm\" (UniqueName: \"kubernetes.io/projected/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-kube-api-access-5hbcm\") pod \"must-gather-zc9sw\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:29 crc kubenswrapper[4650]: I0201 08:10:29.955912 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-must-gather-output\") pod \"must-gather-zc9sw\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:30 crc kubenswrapper[4650]: I0201 08:10:30.057849 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hbcm\" (UniqueName: \"kubernetes.io/projected/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-kube-api-access-5hbcm\") pod \"must-gather-zc9sw\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:30 crc kubenswrapper[4650]: I0201 08:10:30.058260 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-must-gather-output\") pod \"must-gather-zc9sw\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:30 crc kubenswrapper[4650]: I0201 08:10:30.058647 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-must-gather-output\") pod \"must-gather-zc9sw\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:30 crc kubenswrapper[4650]: I0201 08:10:30.105652 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hbcm\" (UniqueName: \"kubernetes.io/projected/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-kube-api-access-5hbcm\") pod \"must-gather-zc9sw\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:30 crc kubenswrapper[4650]: I0201 08:10:30.221155 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:10:30 crc kubenswrapper[4650]: I0201 08:10:30.846247 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bzzm4/must-gather-zc9sw"] Feb 01 08:10:31 crc kubenswrapper[4650]: I0201 08:10:31.347442 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" event={"ID":"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd","Type":"ContainerStarted","Data":"cdd2f3a819dce29e2a78934922bc44e5359dfca29722fba6c5c736e00e957a31"} Feb 01 08:10:33 crc kubenswrapper[4650]: I0201 08:10:33.967526 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:10:33 crc kubenswrapper[4650]: I0201 08:10:33.968491 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:10:33 crc kubenswrapper[4650]: E0201 08:10:33.969603 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:10:36 crc kubenswrapper[4650]: I0201 08:10:36.391591 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" event={"ID":"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd","Type":"ContainerStarted","Data":"ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f"} Feb 01 08:10:36 crc kubenswrapper[4650]: I0201 08:10:36.392220 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" event={"ID":"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd","Type":"ContainerStarted","Data":"e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d"} Feb 01 08:10:36 crc kubenswrapper[4650]: I0201 08:10:36.420899 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" podStartSLOduration=3.060459357 podStartE2EDuration="7.420874461s" podCreationTimestamp="2026-02-01 08:10:29 +0000 UTC" firstStartedPulling="2026-02-01 08:10:30.847084767 +0000 UTC m=+2829.570183012" lastFinishedPulling="2026-02-01 08:10:35.207499871 +0000 UTC m=+2833.930598116" observedRunningTime="2026-02-01 08:10:36.409197042 +0000 UTC m=+2835.132295307" watchObservedRunningTime="2026-02-01 08:10:36.420874461 +0000 UTC m=+2835.143972706" Feb 01 08:10:37 crc kubenswrapper[4650]: I0201 08:10:37.161692 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:10:37 crc kubenswrapper[4650]: I0201 08:10:37.161767 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:10:37 crc kubenswrapper[4650]: I0201 08:10:37.967129 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:10:37 crc kubenswrapper[4650]: I0201 08:10:37.967223 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:10:37 crc kubenswrapper[4650]: I0201 08:10:37.967258 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:10:37 crc kubenswrapper[4650]: I0201 08:10:37.967357 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:10:37 crc kubenswrapper[4650]: E0201 08:10:37.967740 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:10:39 crc kubenswrapper[4650]: E0201 08:10:39.714639 4650 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.51:44938->38.102.83.51:34575: write tcp 38.102.83.51:44938->38.102.83.51:34575: write: connection reset by peer Feb 01 08:10:40 crc kubenswrapper[4650]: I0201 08:10:40.902729 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bzzm4/crc-debug-tshnl"] Feb 01 08:10:40 crc kubenswrapper[4650]: I0201 08:10:40.904716 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:40 crc kubenswrapper[4650]: I0201 08:10:40.906397 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-bzzm4"/"default-dockercfg-jk8sw" Feb 01 08:10:40 crc kubenswrapper[4650]: I0201 08:10:40.974675 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/de30a1c5-facc-4c98-99dc-88b0f7cee234-host\") pod \"crc-debug-tshnl\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:40 crc kubenswrapper[4650]: I0201 08:10:40.974810 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkm2p\" (UniqueName: \"kubernetes.io/projected/de30a1c5-facc-4c98-99dc-88b0f7cee234-kube-api-access-lkm2p\") pod \"crc-debug-tshnl\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:41 crc kubenswrapper[4650]: I0201 08:10:41.076929 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkm2p\" (UniqueName: \"kubernetes.io/projected/de30a1c5-facc-4c98-99dc-88b0f7cee234-kube-api-access-lkm2p\") pod \"crc-debug-tshnl\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:41 crc kubenswrapper[4650]: I0201 08:10:41.077483 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/de30a1c5-facc-4c98-99dc-88b0f7cee234-host\") pod \"crc-debug-tshnl\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:41 crc kubenswrapper[4650]: I0201 08:10:41.077641 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/de30a1c5-facc-4c98-99dc-88b0f7cee234-host\") pod \"crc-debug-tshnl\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:41 crc kubenswrapper[4650]: I0201 08:10:41.107050 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkm2p\" (UniqueName: \"kubernetes.io/projected/de30a1c5-facc-4c98-99dc-88b0f7cee234-kube-api-access-lkm2p\") pod \"crc-debug-tshnl\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:41 crc kubenswrapper[4650]: I0201 08:10:41.227535 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:10:41 crc kubenswrapper[4650]: I0201 08:10:41.430998 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" event={"ID":"de30a1c5-facc-4c98-99dc-88b0f7cee234","Type":"ContainerStarted","Data":"3efd558d801fd40945d6a3995667a65a7726b6bde1b4d58850d7fe90b7bcdc9b"} Feb 01 08:10:48 crc kubenswrapper[4650]: I0201 08:10:48.966434 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:10:48 crc kubenswrapper[4650]: I0201 08:10:48.966972 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:10:48 crc kubenswrapper[4650]: E0201 08:10:48.967186 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:10:51 crc kubenswrapper[4650]: I0201 08:10:51.973483 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:10:51 crc kubenswrapper[4650]: I0201 08:10:51.974071 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:10:51 crc kubenswrapper[4650]: I0201 08:10:51.974097 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:10:51 crc kubenswrapper[4650]: I0201 08:10:51.974154 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:10:51 crc kubenswrapper[4650]: E0201 08:10:51.974553 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.548878 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" event={"ID":"de30a1c5-facc-4c98-99dc-88b0f7cee234","Type":"ContainerStarted","Data":"1d6248bdf05756a498df5887054790196f1a0e0eb8af2c4bc452cda0fe6188b8"} Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.573528 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" podStartSLOduration=2.217713329 podStartE2EDuration="15.573510978s" podCreationTimestamp="2026-02-01 08:10:40 +0000 UTC" firstStartedPulling="2026-02-01 08:10:41.25368376 +0000 UTC m=+2839.976782005" lastFinishedPulling="2026-02-01 08:10:54.609481409 +0000 UTC m=+2853.332579654" observedRunningTime="2026-02-01 08:10:55.570698343 +0000 UTC m=+2854.293796588" watchObservedRunningTime="2026-02-01 08:10:55.573510978 +0000 UTC m=+2854.296609223" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.662786 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h8xs4"] Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.664627 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.682803 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h8xs4"] Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.749381 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7fgj\" (UniqueName: \"kubernetes.io/projected/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-kube-api-access-h7fgj\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.749721 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-utilities\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.750135 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-catalog-content\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.851813 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7fgj\" (UniqueName: \"kubernetes.io/projected/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-kube-api-access-h7fgj\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.852185 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-utilities\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.852396 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-catalog-content\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.852823 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-utilities\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.853130 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-catalog-content\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.885833 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7fgj\" (UniqueName: \"kubernetes.io/projected/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-kube-api-access-h7fgj\") pod \"redhat-operators-h8xs4\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:55 crc kubenswrapper[4650]: I0201 08:10:55.983515 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:10:56 crc kubenswrapper[4650]: I0201 08:10:56.598810 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h8xs4"] Feb 01 08:10:56 crc kubenswrapper[4650]: W0201 08:10:56.617209 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4ea2a73_8081_43e2_b8d4_9913f624e2cb.slice/crio-756892cce454279ab32773ba595253b1ee83dc602f62eecc994e77b126718ecb WatchSource:0}: Error finding container 756892cce454279ab32773ba595253b1ee83dc602f62eecc994e77b126718ecb: Status 404 returned error can't find the container with id 756892cce454279ab32773ba595253b1ee83dc602f62eecc994e77b126718ecb Feb 01 08:10:57 crc kubenswrapper[4650]: I0201 08:10:57.583043 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerID="e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6" exitCode=0 Feb 01 08:10:57 crc kubenswrapper[4650]: I0201 08:10:57.584885 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8xs4" event={"ID":"f4ea2a73-8081-43e2-b8d4-9913f624e2cb","Type":"ContainerDied","Data":"e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6"} Feb 01 08:10:57 crc kubenswrapper[4650]: I0201 08:10:57.585053 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8xs4" event={"ID":"f4ea2a73-8081-43e2-b8d4-9913f624e2cb","Type":"ContainerStarted","Data":"756892cce454279ab32773ba595253b1ee83dc602f62eecc994e77b126718ecb"} Feb 01 08:10:58 crc kubenswrapper[4650]: I0201 08:10:58.593283 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8xs4" event={"ID":"f4ea2a73-8081-43e2-b8d4-9913f624e2cb","Type":"ContainerStarted","Data":"04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc"} Feb 01 08:11:00 crc kubenswrapper[4650]: I0201 08:11:00.965418 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:11:00 crc kubenswrapper[4650]: I0201 08:11:00.965974 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:00 crc kubenswrapper[4650]: E0201 08:11:00.966193 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:01 crc kubenswrapper[4650]: I0201 08:11:01.463465 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:11:01 crc kubenswrapper[4650]: E0201 08:11:01.463816 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:11:01 crc kubenswrapper[4650]: E0201 08:11:01.463891 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:13:03.463872189 +0000 UTC m=+2982.186970434 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:11:04 crc kubenswrapper[4650]: I0201 08:11:04.966012 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:11:04 crc kubenswrapper[4650]: I0201 08:11:04.966631 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:11:04 crc kubenswrapper[4650]: I0201 08:11:04.966660 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:11:04 crc kubenswrapper[4650]: I0201 08:11:04.966722 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:11:04 crc kubenswrapper[4650]: E0201 08:11:04.969858 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:11:07 crc kubenswrapper[4650]: I0201 08:11:07.160832 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:11:07 crc kubenswrapper[4650]: I0201 08:11:07.161971 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:11:09 crc kubenswrapper[4650]: I0201 08:11:09.681073 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerID="04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc" exitCode=0 Feb 01 08:11:09 crc kubenswrapper[4650]: I0201 08:11:09.681146 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8xs4" event={"ID":"f4ea2a73-8081-43e2-b8d4-9913f624e2cb","Type":"ContainerDied","Data":"04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc"} Feb 01 08:11:10 crc kubenswrapper[4650]: I0201 08:11:10.691422 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8xs4" event={"ID":"f4ea2a73-8081-43e2-b8d4-9913f624e2cb","Type":"ContainerStarted","Data":"60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5"} Feb 01 08:11:10 crc kubenswrapper[4650]: I0201 08:11:10.712606 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h8xs4" podStartSLOduration=3.15095147 podStartE2EDuration="15.712586578s" podCreationTimestamp="2026-02-01 08:10:55 +0000 UTC" firstStartedPulling="2026-02-01 08:10:57.590958169 +0000 UTC m=+2856.314056414" lastFinishedPulling="2026-02-01 08:11:10.152593277 +0000 UTC m=+2868.875691522" observedRunningTime="2026-02-01 08:11:10.709321592 +0000 UTC m=+2869.432419837" watchObservedRunningTime="2026-02-01 08:11:10.712586578 +0000 UTC m=+2869.435684823" Feb 01 08:11:15 crc kubenswrapper[4650]: E0201 08:11:15.610288 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:11:15 crc kubenswrapper[4650]: I0201 08:11:15.724828 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:11:15 crc kubenswrapper[4650]: I0201 08:11:15.965531 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:11:15 crc kubenswrapper[4650]: I0201 08:11:15.965796 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:15 crc kubenswrapper[4650]: E0201 08:11:15.966094 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:15 crc kubenswrapper[4650]: I0201 08:11:15.984702 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:11:15 crc kubenswrapper[4650]: I0201 08:11:15.985297 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:11:16 crc kubenswrapper[4650]: I0201 08:11:16.734373 4650 generic.go:334] "Generic (PLEG): container finished" podID="de30a1c5-facc-4c98-99dc-88b0f7cee234" containerID="1d6248bdf05756a498df5887054790196f1a0e0eb8af2c4bc452cda0fe6188b8" exitCode=0 Feb 01 08:11:16 crc kubenswrapper[4650]: I0201 08:11:16.734458 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" event={"ID":"de30a1c5-facc-4c98-99dc-88b0f7cee234","Type":"ContainerDied","Data":"1d6248bdf05756a498df5887054790196f1a0e0eb8af2c4bc452cda0fe6188b8"} Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.050663 4650 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h8xs4" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="registry-server" probeResult="failure" output=< Feb 01 08:11:17 crc kubenswrapper[4650]: timeout: failed to connect service ":50051" within 1s Feb 01 08:11:17 crc kubenswrapper[4650]: > Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.837884 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.870539 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bzzm4/crc-debug-tshnl"] Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.877325 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bzzm4/crc-debug-tshnl"] Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.983463 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkm2p\" (UniqueName: \"kubernetes.io/projected/de30a1c5-facc-4c98-99dc-88b0f7cee234-kube-api-access-lkm2p\") pod \"de30a1c5-facc-4c98-99dc-88b0f7cee234\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.983740 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/de30a1c5-facc-4c98-99dc-88b0f7cee234-host\") pod \"de30a1c5-facc-4c98-99dc-88b0f7cee234\" (UID: \"de30a1c5-facc-4c98-99dc-88b0f7cee234\") " Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.983909 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de30a1c5-facc-4c98-99dc-88b0f7cee234-host" (OuterVolumeSpecName: "host") pod "de30a1c5-facc-4c98-99dc-88b0f7cee234" (UID: "de30a1c5-facc-4c98-99dc-88b0f7cee234"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.984303 4650 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/de30a1c5-facc-4c98-99dc-88b0f7cee234-host\") on node \"crc\" DevicePath \"\"" Feb 01 08:11:17 crc kubenswrapper[4650]: I0201 08:11:17.999072 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de30a1c5-facc-4c98-99dc-88b0f7cee234-kube-api-access-lkm2p" (OuterVolumeSpecName: "kube-api-access-lkm2p") pod "de30a1c5-facc-4c98-99dc-88b0f7cee234" (UID: "de30a1c5-facc-4c98-99dc-88b0f7cee234"). InnerVolumeSpecName "kube-api-access-lkm2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:11:18 crc kubenswrapper[4650]: I0201 08:11:18.086188 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkm2p\" (UniqueName: \"kubernetes.io/projected/de30a1c5-facc-4c98-99dc-88b0f7cee234-kube-api-access-lkm2p\") on node \"crc\" DevicePath \"\"" Feb 01 08:11:18 crc kubenswrapper[4650]: I0201 08:11:18.750866 4650 scope.go:117] "RemoveContainer" containerID="1d6248bdf05756a498df5887054790196f1a0e0eb8af2c4bc452cda0fe6188b8" Feb 01 08:11:18 crc kubenswrapper[4650]: I0201 08:11:18.750912 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-tshnl" Feb 01 08:11:18 crc kubenswrapper[4650]: I0201 08:11:18.966139 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:11:18 crc kubenswrapper[4650]: I0201 08:11:18.966201 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:11:18 crc kubenswrapper[4650]: I0201 08:11:18.966222 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:11:18 crc kubenswrapper[4650]: I0201 08:11:18.966279 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:11:18 crc kubenswrapper[4650]: E0201 08:11:18.966813 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.099692 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bzzm4/crc-debug-vb2tb"] Feb 01 08:11:19 crc kubenswrapper[4650]: E0201 08:11:19.100214 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de30a1c5-facc-4c98-99dc-88b0f7cee234" containerName="container-00" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.100235 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="de30a1c5-facc-4c98-99dc-88b0f7cee234" containerName="container-00" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.100441 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="de30a1c5-facc-4c98-99dc-88b0f7cee234" containerName="container-00" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.101184 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.106589 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-bzzm4"/"default-dockercfg-jk8sw" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.209169 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-host\") pod \"crc-debug-vb2tb\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.209620 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bwf9\" (UniqueName: \"kubernetes.io/projected/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-kube-api-access-7bwf9\") pod \"crc-debug-vb2tb\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.312132 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bwf9\" (UniqueName: \"kubernetes.io/projected/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-kube-api-access-7bwf9\") pod \"crc-debug-vb2tb\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.312198 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-host\") pod \"crc-debug-vb2tb\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.312327 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-host\") pod \"crc-debug-vb2tb\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.344993 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bwf9\" (UniqueName: \"kubernetes.io/projected/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-kube-api-access-7bwf9\") pod \"crc-debug-vb2tb\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.418498 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.760157 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" event={"ID":"20ac6da2-62fc-43ab-86f0-b5d19f3f5178","Type":"ContainerStarted","Data":"e11d9f1697428d09e5645532e9977df506b860e7c0d947e2d81ef0d1bee52bb6"} Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.760235 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" event={"ID":"20ac6da2-62fc-43ab-86f0-b5d19f3f5178","Type":"ContainerStarted","Data":"d9b94fb599c5da9da6c8baf2586f8172914cecfb2fda754c5994aa8228b1a8a4"} Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.776936 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" podStartSLOduration=0.776906524 podStartE2EDuration="776.906524ms" podCreationTimestamp="2026-02-01 08:11:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 08:11:19.773075703 +0000 UTC m=+2878.496173958" watchObservedRunningTime="2026-02-01 08:11:19.776906524 +0000 UTC m=+2878.500004809" Feb 01 08:11:19 crc kubenswrapper[4650]: I0201 08:11:19.997363 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de30a1c5-facc-4c98-99dc-88b0f7cee234" path="/var/lib/kubelet/pods/de30a1c5-facc-4c98-99dc-88b0f7cee234/volumes" Feb 01 08:11:20 crc kubenswrapper[4650]: I0201 08:11:20.774257 4650 generic.go:334] "Generic (PLEG): container finished" podID="20ac6da2-62fc-43ab-86f0-b5d19f3f5178" containerID="e11d9f1697428d09e5645532e9977df506b860e7c0d947e2d81ef0d1bee52bb6" exitCode=1 Feb 01 08:11:20 crc kubenswrapper[4650]: I0201 08:11:20.774309 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" event={"ID":"20ac6da2-62fc-43ab-86f0-b5d19f3f5178","Type":"ContainerDied","Data":"e11d9f1697428d09e5645532e9977df506b860e7c0d947e2d81ef0d1bee52bb6"} Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.867500 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.900884 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bzzm4/crc-debug-vb2tb"] Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.910609 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bzzm4/crc-debug-vb2tb"] Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.922044 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-host\") pod \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.922130 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bwf9\" (UniqueName: \"kubernetes.io/projected/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-kube-api-access-7bwf9\") pod \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\" (UID: \"20ac6da2-62fc-43ab-86f0-b5d19f3f5178\") " Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.922194 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-host" (OuterVolumeSpecName: "host") pod "20ac6da2-62fc-43ab-86f0-b5d19f3f5178" (UID: "20ac6da2-62fc-43ab-86f0-b5d19f3f5178"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.922494 4650 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-host\") on node \"crc\" DevicePath \"\"" Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.933197 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-kube-api-access-7bwf9" (OuterVolumeSpecName: "kube-api-access-7bwf9") pod "20ac6da2-62fc-43ab-86f0-b5d19f3f5178" (UID: "20ac6da2-62fc-43ab-86f0-b5d19f3f5178"). InnerVolumeSpecName "kube-api-access-7bwf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:11:21 crc kubenswrapper[4650]: I0201 08:11:21.978890 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20ac6da2-62fc-43ab-86f0-b5d19f3f5178" path="/var/lib/kubelet/pods/20ac6da2-62fc-43ab-86f0-b5d19f3f5178/volumes" Feb 01 08:11:22 crc kubenswrapper[4650]: I0201 08:11:22.023749 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bwf9\" (UniqueName: \"kubernetes.io/projected/20ac6da2-62fc-43ab-86f0-b5d19f3f5178-kube-api-access-7bwf9\") on node \"crc\" DevicePath \"\"" Feb 01 08:11:22 crc kubenswrapper[4650]: I0201 08:11:22.789262 4650 scope.go:117] "RemoveContainer" containerID="e11d9f1697428d09e5645532e9977df506b860e7c0d947e2d81ef0d1bee52bb6" Feb 01 08:11:22 crc kubenswrapper[4650]: I0201 08:11:22.789285 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/crc-debug-vb2tb" Feb 01 08:11:26 crc kubenswrapper[4650]: I0201 08:11:26.033959 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:11:26 crc kubenswrapper[4650]: I0201 08:11:26.089714 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:11:26 crc kubenswrapper[4650]: I0201 08:11:26.868088 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h8xs4"] Feb 01 08:11:26 crc kubenswrapper[4650]: I0201 08:11:26.965095 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:11:26 crc kubenswrapper[4650]: I0201 08:11:26.965125 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:26 crc kubenswrapper[4650]: E0201 08:11:26.965358 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:27 crc kubenswrapper[4650]: I0201 08:11:27.831552 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h8xs4" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="registry-server" containerID="cri-o://60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5" gracePeriod=2 Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.334566 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.440254 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-utilities\") pod \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.440297 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-catalog-content\") pod \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.440429 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7fgj\" (UniqueName: \"kubernetes.io/projected/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-kube-api-access-h7fgj\") pod \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\" (UID: \"f4ea2a73-8081-43e2-b8d4-9913f624e2cb\") " Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.442399 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-utilities" (OuterVolumeSpecName: "utilities") pod "f4ea2a73-8081-43e2-b8d4-9913f624e2cb" (UID: "f4ea2a73-8081-43e2-b8d4-9913f624e2cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.451273 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-kube-api-access-h7fgj" (OuterVolumeSpecName: "kube-api-access-h7fgj") pod "f4ea2a73-8081-43e2-b8d4-9913f624e2cb" (UID: "f4ea2a73-8081-43e2-b8d4-9913f624e2cb"). InnerVolumeSpecName "kube-api-access-h7fgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.542869 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7fgj\" (UniqueName: \"kubernetes.io/projected/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-kube-api-access-h7fgj\") on node \"crc\" DevicePath \"\"" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.542922 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.565086 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4ea2a73-8081-43e2-b8d4-9913f624e2cb" (UID: "f4ea2a73-8081-43e2-b8d4-9913f624e2cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.644897 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ea2a73-8081-43e2-b8d4-9913f624e2cb-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.844220 4650 generic.go:334] "Generic (PLEG): container finished" podID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerID="60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5" exitCode=0 Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.844315 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8xs4" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.844357 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8xs4" event={"ID":"f4ea2a73-8081-43e2-b8d4-9913f624e2cb","Type":"ContainerDied","Data":"60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5"} Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.845558 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8xs4" event={"ID":"f4ea2a73-8081-43e2-b8d4-9913f624e2cb","Type":"ContainerDied","Data":"756892cce454279ab32773ba595253b1ee83dc602f62eecc994e77b126718ecb"} Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.845611 4650 scope.go:117] "RemoveContainer" containerID="60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.868317 4650 scope.go:117] "RemoveContainer" containerID="04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.907414 4650 scope.go:117] "RemoveContainer" containerID="e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.910004 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h8xs4"] Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.923681 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h8xs4"] Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.982508 4650 scope.go:117] "RemoveContainer" containerID="60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5" Feb 01 08:11:28 crc kubenswrapper[4650]: E0201 08:11:28.983075 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5\": container with ID starting with 60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5 not found: ID does not exist" containerID="60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.983134 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5"} err="failed to get container status \"60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5\": rpc error: code = NotFound desc = could not find container \"60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5\": container with ID starting with 60b5dad82f28ae1b895684c43270af4047851f58b7ab2b79d517608c13a3ecb5 not found: ID does not exist" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.983177 4650 scope.go:117] "RemoveContainer" containerID="04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc" Feb 01 08:11:28 crc kubenswrapper[4650]: E0201 08:11:28.983951 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc\": container with ID starting with 04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc not found: ID does not exist" containerID="04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.983989 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc"} err="failed to get container status \"04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc\": rpc error: code = NotFound desc = could not find container \"04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc\": container with ID starting with 04c536f0c0a4de0f75b68cf20b7d276882d7367a0a0f256bf637f5c5dd0ed7cc not found: ID does not exist" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.984018 4650 scope.go:117] "RemoveContainer" containerID="e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6" Feb 01 08:11:28 crc kubenswrapper[4650]: E0201 08:11:28.984433 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6\": container with ID starting with e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6 not found: ID does not exist" containerID="e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6" Feb 01 08:11:28 crc kubenswrapper[4650]: I0201 08:11:28.984483 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6"} err="failed to get container status \"e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6\": rpc error: code = NotFound desc = could not find container \"e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6\": container with ID starting with e2439f34aa2451eaf15610b1ac9555294f4c72067fed2413ab5bd4afa9281cc6 not found: ID does not exist" Feb 01 08:11:30 crc kubenswrapper[4650]: I0201 08:11:29.979191 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" path="/var/lib/kubelet/pods/f4ea2a73-8081-43e2-b8d4-9913f624e2cb/volumes" Feb 01 08:11:32 crc kubenswrapper[4650]: I0201 08:11:32.964971 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:11:32 crc kubenswrapper[4650]: I0201 08:11:32.965637 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:11:32 crc kubenswrapper[4650]: I0201 08:11:32.965670 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:11:32 crc kubenswrapper[4650]: I0201 08:11:32.965753 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:11:32 crc kubenswrapper[4650]: E0201 08:11:32.966186 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.161616 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.162287 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.162352 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.163334 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5"} pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.163432 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" containerID="cri-o://8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" gracePeriod=600 Feb 01 08:11:37 crc kubenswrapper[4650]: E0201 08:11:37.291746 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.940735 4650 generic.go:334] "Generic (PLEG): container finished" podID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" exitCode=0 Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.940772 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerDied","Data":"8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5"} Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.941588 4650 scope.go:117] "RemoveContainer" containerID="602c56a59dd65049f4631ec9296207f999808cfcf7cd5f0739471580c8fa001e" Feb 01 08:11:37 crc kubenswrapper[4650]: I0201 08:11:37.942161 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:11:37 crc kubenswrapper[4650]: E0201 08:11:37.942517 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:11:38 crc kubenswrapper[4650]: I0201 08:11:38.965113 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:11:38 crc kubenswrapper[4650]: I0201 08:11:38.965145 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:39 crc kubenswrapper[4650]: E0201 08:11:39.169254 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:39 crc kubenswrapper[4650]: I0201 08:11:39.974725 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"b9bd3c3e2f4658605901e21b729ee379b472bad7de5a122aef3d8bb087d59360"} Feb 01 08:11:39 crc kubenswrapper[4650]: I0201 08:11:39.974916 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:11:39 crc kubenswrapper[4650]: I0201 08:11:39.975336 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:39 crc kubenswrapper[4650]: E0201 08:11:39.975525 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:40 crc kubenswrapper[4650]: I0201 08:11:40.974932 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:40 crc kubenswrapper[4650]: E0201 08:11:40.975423 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:43 crc kubenswrapper[4650]: I0201 08:11:43.966164 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:11:43 crc kubenswrapper[4650]: I0201 08:11:43.966535 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:11:43 crc kubenswrapper[4650]: I0201 08:11:43.966558 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:11:43 crc kubenswrapper[4650]: I0201 08:11:43.966614 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:11:43 crc kubenswrapper[4650]: E0201 08:11:43.966939 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:11:44 crc kubenswrapper[4650]: I0201 08:11:44.808628 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:11:45 crc kubenswrapper[4650]: I0201 08:11:45.807096 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:11:48 crc kubenswrapper[4650]: I0201 08:11:48.810964 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:11:48 crc kubenswrapper[4650]: I0201 08:11:48.966105 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:11:48 crc kubenswrapper[4650]: E0201 08:11:48.966529 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:11:49 crc kubenswrapper[4650]: I0201 08:11:49.807359 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:11:51 crc kubenswrapper[4650]: I0201 08:11:51.808660 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:11:51 crc kubenswrapper[4650]: I0201 08:11:51.810333 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:11:51 crc kubenswrapper[4650]: I0201 08:11:51.812192 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"b9bd3c3e2f4658605901e21b729ee379b472bad7de5a122aef3d8bb087d59360"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:11:51 crc kubenswrapper[4650]: I0201 08:11:51.812391 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:51 crc kubenswrapper[4650]: I0201 08:11:51.812554 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://b9bd3c3e2f4658605901e21b729ee379b472bad7de5a122aef3d8bb087d59360" gracePeriod=30 Feb 01 08:11:51 crc kubenswrapper[4650]: I0201 08:11:51.829441 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": read tcp 10.217.0.2:32836->10.217.0.173:8080: read: connection reset by peer" Feb 01 08:11:52 crc kubenswrapper[4650]: I0201 08:11:52.067683 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="b9bd3c3e2f4658605901e21b729ee379b472bad7de5a122aef3d8bb087d59360" exitCode=0 Feb 01 08:11:52 crc kubenswrapper[4650]: I0201 08:11:52.067813 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"b9bd3c3e2f4658605901e21b729ee379b472bad7de5a122aef3d8bb087d59360"} Feb 01 08:11:52 crc kubenswrapper[4650]: I0201 08:11:52.068525 4650 scope.go:117] "RemoveContainer" containerID="161e396f63d9ac05cc9251d67fa19c9ed44244e35442438249818be84f3c36c7" Feb 01 08:11:52 crc kubenswrapper[4650]: E0201 08:11:52.249392 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.076906 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c"} Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.077540 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:53 crc kubenswrapper[4650]: E0201 08:11:53.077720 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.078305 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.090534 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" exitCode=1 Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.090572 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1"} Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.090601 4650 scope.go:117] "RemoveContainer" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.091256 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.091497 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.091533 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.091577 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.091597 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:11:53 crc kubenswrapper[4650]: E0201 08:11:53.091907 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:11:53 crc kubenswrapper[4650]: I0201 08:11:53.894904 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c95c4fc5d-sj2l8_1757f711-f748-4782-8075-cc9ae3b3275c/barbican-api/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.105695 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:54 crc kubenswrapper[4650]: E0201 08:11:54.105937 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.147365 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c95c4fc5d-sj2l8_1757f711-f748-4782-8075-cc9ae3b3275c/barbican-api-log/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.232506 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-595fb9c59d-bnj8v_4ac48219-ace9-4dec-a04a-c710e730a1d4/barbican-keystone-listener/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.359947 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-595fb9c59d-bnj8v_4ac48219-ace9-4dec-a04a-c710e730a1d4/barbican-keystone-listener-log/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.439994 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-8466dd5d47-jv8ww_8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa/barbican-worker/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.523333 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-8466dd5d47-jv8ww_8d2d19e9-bf5b-4f95-beb0-415a3bf6e4aa/barbican-worker-log/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.692261 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9d9859b9-fb15-4385-bcc3-b5d6044750dd/ceilometer-central-agent/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.735684 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9d9859b9-fb15-4385-bcc3-b5d6044750dd/proxy-httpd/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.746618 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9d9859b9-fb15-4385-bcc3-b5d6044750dd/ceilometer-notification-agent/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.855175 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9d9859b9-fb15-4385-bcc3-b5d6044750dd/sg-core/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.926767 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_dd2aba78-ffb9-4c24-bd46-0ecc5c93217e/cinder-api/0.log" Feb 01 08:11:54 crc kubenswrapper[4650]: I0201 08:11:54.972333 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_dd2aba78-ffb9-4c24-bd46-0ecc5c93217e/cinder-api-log/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.112770 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:11:55 crc kubenswrapper[4650]: E0201 08:11:55.113189 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.141099 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_198da077-645a-4341-9c07-6860b5ce0a0d/cinder-scheduler/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.179279 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_198da077-645a-4341-9c07-6860b5ce0a0d/probe/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.356223 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5459cb87c-blvph_a23bf06f-77cb-493c-8d42-f75156a56918/init/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.570988 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5459cb87c-blvph_a23bf06f-77cb-493c-8d42-f75156a56918/dnsmasq-dns/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.615109 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_34f640f2-11f5-429e-a5e2-41cffad03e78/glance-httpd/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.630789 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5459cb87c-blvph_a23bf06f-77cb-493c-8d42-f75156a56918/init/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.764855 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_34f640f2-11f5-429e-a5e2-41cffad03e78/glance-log/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.869085 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_33c7507f-4100-4022-9c51-482d09197d58/glance-httpd/0.log" Feb 01 08:11:55 crc kubenswrapper[4650]: I0201 08:11:55.919942 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_33c7507f-4100-4022-9c51-482d09197d58/glance-log/0.log" Feb 01 08:11:56 crc kubenswrapper[4650]: I0201 08:11:56.162893 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-79fd8b5f84-qg9cv_9c4bad14-279f-4212-a86d-cea1c9fe7b48/horizon/1.log" Feb 01 08:11:56 crc kubenswrapper[4650]: I0201 08:11:56.219004 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-79fd8b5f84-qg9cv_9c4bad14-279f-4212-a86d-cea1c9fe7b48/horizon/0.log" Feb 01 08:11:56 crc kubenswrapper[4650]: I0201 08:11:56.378878 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-79fd8b5f84-qg9cv_9c4bad14-279f-4212-a86d-cea1c9fe7b48/horizon-log/0.log" Feb 01 08:11:56 crc kubenswrapper[4650]: I0201 08:11:56.510145 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-b4f94b5c6-zjcnl_b99a5b57-fa0e-464e-a115-4afe6f30f193/keystone-api/0.log" Feb 01 08:11:56 crc kubenswrapper[4650]: I0201 08:11:56.562601 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29498881-4thgd_44e12371-b3b4-4575-8595-6f212ce4cb89/keystone-cron/0.log" Feb 01 08:11:56 crc kubenswrapper[4650]: I0201 08:11:56.735284 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_1c77c7ff-72e7-4635-b2b8-2e523265c4ff/kube-state-metrics/0.log" Feb 01 08:11:57 crc kubenswrapper[4650]: I0201 08:11:57.117822 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-d5c446bd9-7rlx2_810a735e-844d-434e-aa3f-7ac5421d1303/neutron-api/0.log" Feb 01 08:11:57 crc kubenswrapper[4650]: I0201 08:11:57.206394 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-d5c446bd9-7rlx2_810a735e-844d-434e-aa3f-7ac5421d1303/neutron-httpd/0.log" Feb 01 08:11:57 crc kubenswrapper[4650]: I0201 08:11:57.642857 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b16176d2-df87-458e-80c6-44d95cc29889/nova-api-log/0.log" Feb 01 08:11:57 crc kubenswrapper[4650]: I0201 08:11:57.648845 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b16176d2-df87-458e-80c6-44d95cc29889/nova-api-api/0.log" Feb 01 08:11:57 crc kubenswrapper[4650]: I0201 08:11:57.812357 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:11:58 crc kubenswrapper[4650]: I0201 08:11:58.048730 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_c9fc1633-ee89-4602-b737-a3644616841b/nova-cell0-conductor-conductor/0.log" Feb 01 08:11:58 crc kubenswrapper[4650]: I0201 08:11:58.216338 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_74d70e84-b5e5-4146-a88f-23b2ece2f6f4/nova-cell1-conductor-conductor/0.log" Feb 01 08:11:58 crc kubenswrapper[4650]: I0201 08:11:58.423727 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_026358a4-5ccb-421a-b878-c0022296eaa1/nova-cell1-novncproxy-novncproxy/0.log" Feb 01 08:11:58 crc kubenswrapper[4650]: I0201 08:11:58.591051 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9f8e0964-a78f-45f4-a5b0-8ecfaa391176/nova-metadata-log/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.025131 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_ac80d66e-9ac2-443c-a731-1dfbbe67e6d0/nova-scheduler-scheduler/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.064327 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac/mysql-bootstrap/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.250961 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac/galera/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.343343 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6c9db6b-e8e5-4033-9abd-e4c04ba5c0ac/mysql-bootstrap/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.534160 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ce8e76c5-52b4-46aa-b009-181f08e5cdc7/mysql-bootstrap/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.705960 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9f8e0964-a78f-45f4-a5b0-8ecfaa391176/nova-metadata-metadata/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.758011 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ce8e76c5-52b4-46aa-b009-181f08e5cdc7/mysql-bootstrap/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.780287 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ce8e76c5-52b4-46aa-b009-181f08e5cdc7/galera/0.log" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.807350 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:11:59 crc kubenswrapper[4650]: I0201 08:11:59.978184 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_eb39e44a-8146-4d73-bee6-6f5a65ccd5e4/openstackclient/0.log" Feb 01 08:12:00 crc kubenswrapper[4650]: I0201 08:12:00.048138 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-df4vg_0eea2c6a-8650-4a55-aab9-0b27b8e829b4/ovn-controller/0.log" Feb 01 08:12:00 crc kubenswrapper[4650]: I0201 08:12:00.233166 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-9qx7d_e49b7206-629f-498a-b30e-e73c08c0bacf/openstack-network-exporter/0.log" Feb 01 08:12:00 crc kubenswrapper[4650]: I0201 08:12:00.428068 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-9xcg8_0a714fe2-3b81-4e99-8596-1b7ccd8d913c/ovsdb-server-init/0.log" Feb 01 08:12:00 crc kubenswrapper[4650]: I0201 08:12:00.744050 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-9xcg8_0a714fe2-3b81-4e99-8596-1b7ccd8d913c/ovs-vswitchd/0.log" Feb 01 08:12:00 crc kubenswrapper[4650]: I0201 08:12:00.746528 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-9xcg8_0a714fe2-3b81-4e99-8596-1b7ccd8d913c/ovsdb-server-init/0.log" Feb 01 08:12:00 crc kubenswrapper[4650]: I0201 08:12:00.763863 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-9xcg8_0a714fe2-3b81-4e99-8596-1b7ccd8d913c/ovsdb-server/0.log" Feb 01 08:12:00 crc kubenswrapper[4650]: I0201 08:12:00.808016 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.171923 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f5748bca-cf73-483d-a5ca-86e592adbc18/ovn-northd/0.log" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.213738 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d74f3701-5d40-45dd-8a2d-041cdb5a8720/openstack-network-exporter/0.log" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.240548 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f5748bca-cf73-483d-a5ca-86e592adbc18/openstack-network-exporter/0.log" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.439409 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_aa3b9991-cc4c-437b-aa76-a0bb01050b1d/openstack-network-exporter/0.log" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.542216 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d74f3701-5d40-45dd-8a2d-041cdb5a8720/ovsdbserver-nb/0.log" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.572995 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_aa3b9991-cc4c-437b-aa76-a0bb01050b1d/ovsdbserver-sb/0.log" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.845180 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8d56c6c66-9jtxc_9d6f0da1-e84c-4917-8ec7-6fb5fb34a949/placement-api/0.log" Feb 01 08:12:01 crc kubenswrapper[4650]: I0201 08:12:01.916198 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8d56c6c66-9jtxc_9d6f0da1-e84c-4917-8ec7-6fb5fb34a949/placement-log/0.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.019785 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_9c378d90-fab5-4d68-9aba-892645206b97/setup-container/0.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.328940 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_722b2919-c0d6-4596-82cc-5ae2b5951263/setup-container/0.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.378511 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_9c378d90-fab5-4d68-9aba-892645206b97/setup-container/0.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.468665 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_9c378d90-fab5-4d68-9aba-892645206b97/rabbitmq/0.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.683661 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_722b2919-c0d6-4596-82cc-5ae2b5951263/setup-container/0.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.741316 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_722b2919-c0d6-4596-82cc-5ae2b5951263/rabbitmq/0.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.789335 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-599d7597b9-mh6hj_39a11122-6fd9-463b-8194-c098d9e764ec/proxy-httpd/15.log" Feb 01 08:12:02 crc kubenswrapper[4650]: I0201 08:12:02.956420 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-599d7597b9-mh6hj_39a11122-6fd9-463b-8194-c098d9e764ec/proxy-httpd/14.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.036968 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-599d7597b9-mh6hj_39a11122-6fd9-463b-8194-c098d9e764ec/proxy-server/10.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.088595 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-599d7597b9-mh6hj_39a11122-6fd9-463b-8194-c098d9e764ec/proxy-server/10.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.332562 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/account-auditor/0.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.350596 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/account-reaper/0.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.694779 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/account-replicator/10.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.739080 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/account-server/0.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.771143 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/account-replicator/10.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.776756 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/container-auditor/0.log" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.811326 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.811397 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.812322 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.812352 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.812377 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" gracePeriod=30 Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.835178 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": read tcp 10.217.0.2:41472->10.217.0.173:8080: read: connection reset by peer" Feb 01 08:12:03 crc kubenswrapper[4650]: I0201 08:12:03.968273 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:12:03 crc kubenswrapper[4650]: E0201 08:12:03.968810 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.015317 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/container-replicator/10.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.028557 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/container-server/0.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.030214 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/container-replicator/10.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.090518 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/container-updater/7.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.193387 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" exitCode=0 Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.193431 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c"} Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.193463 4650 scope.go:117] "RemoveContainer" containerID="b9bd3c3e2f4658605901e21b729ee379b472bad7de5a122aef3d8bb087d59360" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.274054 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/container-updater/7.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.308769 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/object-auditor/0.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.367921 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/object-expirer/10.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.415225 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/object-expirer/10.log" Feb 01 08:12:04 crc kubenswrapper[4650]: E0201 08:12:04.469722 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.564986 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/object-server/0.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.591028 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/object-replicator/0.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.658966 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/object-updater/6.log" Feb 01 08:12:04 crc kubenswrapper[4650]: E0201 08:12:04.697007 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec\": container with ID starting with 0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec not found: ID does not exist" containerID="0742e6d978def04c80fefb526db9b4b9ea80f2d6e015f72608ff2f82685e27ec" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.799823 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.173:8080/healthcheck\": dial tcp 10.217.0.173:8080: connect: connection refused" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.869616 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/rsync/0.log" Feb 01 08:12:04 crc kubenswrapper[4650]: I0201 08:12:04.879811 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_78a7b8d6-a107-4698-b85d-77d415755428/swift-recon-cron/0.log" Feb 01 08:12:05 crc kubenswrapper[4650]: I0201 08:12:05.206569 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:12:05 crc kubenswrapper[4650]: I0201 08:12:05.206599 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:12:05 crc kubenswrapper[4650]: E0201 08:12:05.206814 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:12:07 crc kubenswrapper[4650]: I0201 08:12:07.918258 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_ca494288-bfa9-474d-8805-21226c9d7cbd/memcached/0.log" Feb 01 08:12:07 crc kubenswrapper[4650]: I0201 08:12:07.966217 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:12:07 crc kubenswrapper[4650]: I0201 08:12:07.966305 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:12:07 crc kubenswrapper[4650]: I0201 08:12:07.966349 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:12:07 crc kubenswrapper[4650]: I0201 08:12:07.966402 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:12:07 crc kubenswrapper[4650]: I0201 08:12:07.966410 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:12:07 crc kubenswrapper[4650]: E0201 08:12:07.966803 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:12:16 crc kubenswrapper[4650]: I0201 08:12:16.966805 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:12:16 crc kubenswrapper[4650]: I0201 08:12:16.967769 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:12:16 crc kubenswrapper[4650]: I0201 08:12:16.967809 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:12:16 crc kubenswrapper[4650]: E0201 08:12:16.968170 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:12:16 crc kubenswrapper[4650]: E0201 08:12:16.968286 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:12:19 crc kubenswrapper[4650]: I0201 08:12:19.965585 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:12:19 crc kubenswrapper[4650]: I0201 08:12:19.966114 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:12:19 crc kubenswrapper[4650]: I0201 08:12:19.966136 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:12:19 crc kubenswrapper[4650]: I0201 08:12:19.966180 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:12:19 crc kubenswrapper[4650]: I0201 08:12:19.966186 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:12:20 crc kubenswrapper[4650]: I0201 08:12:20.316523 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60"} Feb 01 08:12:20 crc kubenswrapper[4650]: E0201 08:12:20.615155 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.329369 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" exitCode=1 Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.329654 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" exitCode=1 Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.329666 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" exitCode=1 Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.329503 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60"} Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.329695 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076"} Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.329706 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef"} Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.329723 4650 scope.go:117] "RemoveContainer" containerID="362126494f500f12d8ca7d5999161fe6f1b48c2860122778a3cfe23ea05a71d6" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.330761 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.330847 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.330887 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.330946 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.330956 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:12:21 crc kubenswrapper[4650]: E0201 08:12:21.331458 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.442725 4650 scope.go:117] "RemoveContainer" containerID="9a0b299053a39159116bad5f49e7b8f733411d0b98c65a3d9e5512db45a8d818" Feb 01 08:12:21 crc kubenswrapper[4650]: I0201 08:12:21.512615 4650 scope.go:117] "RemoveContainer" containerID="9d1d3a097026963e24e5ee90732369701b5718704251cbe02fcab3c568b2c0d0" Feb 01 08:12:22 crc kubenswrapper[4650]: I0201 08:12:22.351410 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:12:22 crc kubenswrapper[4650]: I0201 08:12:22.351762 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:12:22 crc kubenswrapper[4650]: I0201 08:12:22.351789 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:12:22 crc kubenswrapper[4650]: I0201 08:12:22.351841 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:12:22 crc kubenswrapper[4650]: I0201 08:12:22.351850 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:12:22 crc kubenswrapper[4650]: E0201 08:12:22.352306 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.189168 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd_51767901-713f-439b-88a1-ec136fbf0efc/util/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.379332 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd_51767901-713f-439b-88a1-ec136fbf0efc/util/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.380735 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd_51767901-713f-439b-88a1-ec136fbf0efc/pull/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.430876 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd_51767901-713f-439b-88a1-ec136fbf0efc/pull/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.620815 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd_51767901-713f-439b-88a1-ec136fbf0efc/util/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.643535 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd_51767901-713f-439b-88a1-ec136fbf0efc/extract/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.697818 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39584f40fa417d020abcb18a0b242f1e21497e3e434589c55af23e3f0bksvbd_51767901-713f-439b-88a1-ec136fbf0efc/pull/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.835077 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b6c4d8c5f-c8x6z_99856058-8981-4ea6-9621-b9908bfd3bc1/manager/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.899647 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8d874c8fc-9wdtd_c7a90234-9c82-425f-81e6-6fc434196e89/manager/0.log" Feb 01 08:12:28 crc kubenswrapper[4650]: I0201 08:12:28.968738 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:12:28 crc kubenswrapper[4650]: E0201 08:12:28.968974 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.046737 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6d9697b7f4-2rm5k_f093c345-aa69-48e5-989c-a1ff94898684/manager/0.log" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.194840 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8886f4c47-zg5gd_9f334ccd-b794-456b-97f9-4a57cc8005b3/manager/0.log" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.294857 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-69d6db494d-8x4cp_31eb2e76-b750-4d61-ba29-39a830fae2e1/manager/0.log" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.409803 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5fb775575f-w64pj_315b2715-63dd-4a0c-8fd3-4fe29f443a76/manager/0.log" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.680118 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79955696d6-v2tmb_d7e8f67a-3581-4df8-8903-7a9ac417a653/manager/0.log" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.693970 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5f4b8bd54d-74flx_c5cb0a01-53a8-410b-bda0-75ae6f19164d/manager/0.log" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.926816 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-84f48565d4-nxkp4_41ada20b-8926-463a-aeda-24a59143fd11/manager/0.log" Feb 01 08:12:29 crc kubenswrapper[4650]: I0201 08:12:29.979967 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7dd968899f-z2hrc_373fce62-65bd-4986-bb76-3abd15205fe7/manager/0.log" Feb 01 08:12:30 crc kubenswrapper[4650]: I0201 08:12:30.871799 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-67bf948998-2pr9r_e65810ee-6370-4e69-9d21-b6c74af493ae/manager/0.log" Feb 01 08:12:30 crc kubenswrapper[4650]: I0201 08:12:30.965282 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:12:30 crc kubenswrapper[4650]: I0201 08:12:30.965570 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:12:30 crc kubenswrapper[4650]: E0201 08:12:30.965933 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:12:30 crc kubenswrapper[4650]: I0201 08:12:30.979604 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-585dbc889-h768c_e426a3b9-307e-43fb-b97b-07e1ca7070c0/manager/0.log" Feb 01 08:12:31 crc kubenswrapper[4650]: I0201 08:12:31.307811 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-55bff696bd-ktrgf_e82b6e7a-07b2-4ad3-a94e-70a7c398a401/manager/0.log" Feb 01 08:12:31 crc kubenswrapper[4650]: I0201 08:12:31.534175 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6687f8d877-jbbh5_db16d8c1-27f1-4922-bfca-e8e605f2add0/manager/0.log" Feb 01 08:12:31 crc kubenswrapper[4650]: I0201 08:12:31.656563 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-59c4b45c4dm7mn9_6073be66-09c1-4fd0-93d2-4e892ca290ff/manager/0.log" Feb 01 08:12:31 crc kubenswrapper[4650]: I0201 08:12:31.906395 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-68b5494db6-rgvsk_29fe6ea0-af9b-4d10-a048-f215ba0ae8f5/operator/0.log" Feb 01 08:12:32 crc kubenswrapper[4650]: I0201 08:12:32.366924 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-wj6vn_f8c846e9-c1e1-4745-8496-931667a06ca0/registry-server/0.log" Feb 01 08:12:32 crc kubenswrapper[4650]: I0201 08:12:32.420595 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-788c46999f-xq7tx_d697c2ab-6e6d-47e6-88c6-588a21de82b5/manager/0.log" Feb 01 08:12:32 crc kubenswrapper[4650]: I0201 08:12:32.646828 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-67485c4bf6-7xmf2_6e1c3cb8-1623-42c8-8b2d-c6bc73e57496/manager/0.log" Feb 01 08:12:32 crc kubenswrapper[4650]: I0201 08:12:32.736754 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b964cf4cd-jsk77_d4c46bd6-4a47-4053-a165-5708ea7cd554/manager/0.log" Feb 01 08:12:32 crc kubenswrapper[4650]: I0201 08:12:32.775071 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-c89v2_c6a2ff37-375f-45b0-bcda-e88907fe869e/operator/0.log" Feb 01 08:12:32 crc kubenswrapper[4650]: I0201 08:12:32.935057 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-657c8cbb9f-9qfrw_4a2041b4-734b-488d-888b-8ee2ca3ecc16/manager/0.log" Feb 01 08:12:33 crc kubenswrapper[4650]: I0201 08:12:33.001466 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-64b5b76f97-fph9h_ef904e35-a87d-44e7-ad35-eddc15e4e6cb/manager/0.log" Feb 01 08:12:33 crc kubenswrapper[4650]: I0201 08:12:33.167342 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-56f8bfcd9f-mgsq9_5b185c7e-2dd4-47a5-aa03-87998587cfa4/manager/0.log" Feb 01 08:12:33 crc kubenswrapper[4650]: I0201 08:12:33.273883 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-564965969-rcncq_d7dfbfb2-7a85-4322-bae0-f6e559687cda/manager/0.log" Feb 01 08:12:37 crc kubenswrapper[4650]: I0201 08:12:37.966111 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:12:37 crc kubenswrapper[4650]: I0201 08:12:37.966404 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:12:37 crc kubenswrapper[4650]: I0201 08:12:37.966432 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:12:37 crc kubenswrapper[4650]: I0201 08:12:37.966476 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:12:37 crc kubenswrapper[4650]: I0201 08:12:37.966483 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:12:37 crc kubenswrapper[4650]: E0201 08:12:37.966823 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:12:40 crc kubenswrapper[4650]: I0201 08:12:40.965453 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:12:40 crc kubenswrapper[4650]: E0201 08:12:40.966345 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:12:43 crc kubenswrapper[4650]: I0201 08:12:43.965362 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:12:43 crc kubenswrapper[4650]: I0201 08:12:43.965669 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:12:43 crc kubenswrapper[4650]: E0201 08:12:43.965919 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:12:49 crc kubenswrapper[4650]: I0201 08:12:49.965848 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:12:49 crc kubenswrapper[4650]: I0201 08:12:49.966276 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:12:49 crc kubenswrapper[4650]: I0201 08:12:49.966308 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:12:49 crc kubenswrapper[4650]: I0201 08:12:49.966388 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:12:49 crc kubenswrapper[4650]: I0201 08:12:49.966399 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:12:49 crc kubenswrapper[4650]: E0201 08:12:49.966974 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:12:52 crc kubenswrapper[4650]: I0201 08:12:52.965986 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:12:52 crc kubenswrapper[4650]: E0201 08:12:52.966973 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:12:54 crc kubenswrapper[4650]: I0201 08:12:54.856568 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-c9v8k_e3b2ad4a-8a06-467b-a83a-f203dd935f9f/control-plane-machine-set-operator/0.log" Feb 01 08:12:54 crc kubenswrapper[4650]: I0201 08:12:54.995865 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-sj7gj_6d720972-b11e-48ba-a5ee-9ceef5808130/kube-rbac-proxy/0.log" Feb 01 08:12:55 crc kubenswrapper[4650]: I0201 08:12:55.041082 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-sj7gj_6d720972-b11e-48ba-a5ee-9ceef5808130/machine-api-operator/0.log" Feb 01 08:12:57 crc kubenswrapper[4650]: I0201 08:12:57.965387 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:12:57 crc kubenswrapper[4650]: I0201 08:12:57.965919 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:12:57 crc kubenswrapper[4650]: E0201 08:12:57.966195 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:13:02 crc kubenswrapper[4650]: I0201 08:13:02.966301 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:13:02 crc kubenswrapper[4650]: I0201 08:13:02.966649 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:13:02 crc kubenswrapper[4650]: I0201 08:13:02.966678 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:13:02 crc kubenswrapper[4650]: I0201 08:13:02.966740 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:13:02 crc kubenswrapper[4650]: I0201 08:13:02.966749 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:13:02 crc kubenswrapper[4650]: E0201 08:13:02.967211 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:13:03 crc kubenswrapper[4650]: I0201 08:13:03.530366 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:13:03 crc kubenswrapper[4650]: E0201 08:13:03.530521 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:13:03 crc kubenswrapper[4650]: E0201 08:13:03.530578 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:15:05.530563198 +0000 UTC m=+3104.253661443 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:13:03 crc kubenswrapper[4650]: I0201 08:13:03.965827 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:13:03 crc kubenswrapper[4650]: E0201 08:13:03.966092 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:13:09 crc kubenswrapper[4650]: I0201 08:13:09.870704 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-p2tvq_d4d25501-9155-4295-9f13-aeb46e745f85/cert-manager-controller/0.log" Feb 01 08:13:09 crc kubenswrapper[4650]: I0201 08:13:09.990077 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-wg2pn_bf5de50c-0ff3-45fa-9888-13446bb0a4ab/cert-manager-cainjector/0.log" Feb 01 08:13:10 crc kubenswrapper[4650]: I0201 08:13:10.119440 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-zs6dr_ef362b17-83bc-4543-839b-1451ee91c2c2/cert-manager-webhook/0.log" Feb 01 08:13:12 crc kubenswrapper[4650]: I0201 08:13:12.965511 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:13:12 crc kubenswrapper[4650]: I0201 08:13:12.965948 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:13:12 crc kubenswrapper[4650]: E0201 08:13:12.966403 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:13:13 crc kubenswrapper[4650]: I0201 08:13:13.966119 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:13:13 crc kubenswrapper[4650]: I0201 08:13:13.966481 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:13:13 crc kubenswrapper[4650]: I0201 08:13:13.966502 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:13:13 crc kubenswrapper[4650]: I0201 08:13:13.966565 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:13:13 crc kubenswrapper[4650]: I0201 08:13:13.966579 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:13:13 crc kubenswrapper[4650]: E0201 08:13:13.967001 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:13:18 crc kubenswrapper[4650]: E0201 08:13:18.727359 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:13:18 crc kubenswrapper[4650]: I0201 08:13:18.768003 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:13:18 crc kubenswrapper[4650]: I0201 08:13:18.964751 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:13:18 crc kubenswrapper[4650]: E0201 08:13:18.965122 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:13:24 crc kubenswrapper[4650]: I0201 08:13:24.441062 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-ks8zg_94a95cbd-86c8-4a06-8690-83b9b2451f5a/nmstate-console-plugin/0.log" Feb 01 08:13:24 crc kubenswrapper[4650]: I0201 08:13:24.720771 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-dl4px_1d8abd69-727b-45f0-ab34-9f94c14dc6b7/kube-rbac-proxy/0.log" Feb 01 08:13:24 crc kubenswrapper[4650]: I0201 08:13:24.752906 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-6grf4_3e1c49a2-2b51-4a9c-9ffe-33905e5bc39d/nmstate-handler/0.log" Feb 01 08:13:24 crc kubenswrapper[4650]: I0201 08:13:24.884973 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-dl4px_1d8abd69-727b-45f0-ab34-9f94c14dc6b7/nmstate-metrics/0.log" Feb 01 08:13:24 crc kubenswrapper[4650]: I0201 08:13:24.967620 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-gnrf8_497239ce-dda7-47b2-8a9f-d8b14b4f05a9/nmstate-operator/0.log" Feb 01 08:13:25 crc kubenswrapper[4650]: I0201 08:13:25.056773 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-t6mzq_e49e2e7c-42b1-4bf3-821e-c369f1651bae/nmstate-webhook/0.log" Feb 01 08:13:26 crc kubenswrapper[4650]: I0201 08:13:26.965291 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:13:26 crc kubenswrapper[4650]: I0201 08:13:26.965588 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:13:26 crc kubenswrapper[4650]: I0201 08:13:26.965609 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:13:26 crc kubenswrapper[4650]: I0201 08:13:26.965653 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:13:26 crc kubenswrapper[4650]: I0201 08:13:26.965659 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:13:26 crc kubenswrapper[4650]: E0201 08:13:26.965983 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:13:27 crc kubenswrapper[4650]: I0201 08:13:27.969936 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:13:27 crc kubenswrapper[4650]: I0201 08:13:27.969964 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:13:27 crc kubenswrapper[4650]: E0201 08:13:27.970283 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:13:31 crc kubenswrapper[4650]: I0201 08:13:31.971841 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:13:31 crc kubenswrapper[4650]: E0201 08:13:31.972643 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:13:38 crc kubenswrapper[4650]: I0201 08:13:38.966287 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:13:38 crc kubenswrapper[4650]: I0201 08:13:38.966867 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:13:38 crc kubenswrapper[4650]: I0201 08:13:38.966900 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:13:38 crc kubenswrapper[4650]: I0201 08:13:38.966980 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:13:38 crc kubenswrapper[4650]: I0201 08:13:38.966994 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:13:38 crc kubenswrapper[4650]: E0201 08:13:38.967501 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:13:41 crc kubenswrapper[4650]: I0201 08:13:41.972959 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:13:41 crc kubenswrapper[4650]: I0201 08:13:41.974328 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:13:41 crc kubenswrapper[4650]: E0201 08:13:41.974720 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:13:43 crc kubenswrapper[4650]: I0201 08:13:43.965676 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:13:43 crc kubenswrapper[4650]: E0201 08:13:43.966273 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:13:49 crc kubenswrapper[4650]: I0201 08:13:49.965868 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:13:49 crc kubenswrapper[4650]: I0201 08:13:49.966379 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:13:49 crc kubenswrapper[4650]: I0201 08:13:49.966410 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:13:49 crc kubenswrapper[4650]: I0201 08:13:49.966473 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:13:49 crc kubenswrapper[4650]: I0201 08:13:49.966481 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:13:49 crc kubenswrapper[4650]: E0201 08:13:49.966921 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:13:53 crc kubenswrapper[4650]: I0201 08:13:53.831551 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-27zgr_5cd9c5b4-5653-49aa-8219-21fa9cdabeca/kube-rbac-proxy/0.log" Feb 01 08:13:53 crc kubenswrapper[4650]: I0201 08:13:53.965976 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:13:53 crc kubenswrapper[4650]: I0201 08:13:53.966000 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:13:53 crc kubenswrapper[4650]: E0201 08:13:53.966254 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.009806 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-27zgr_5cd9c5b4-5653-49aa-8219-21fa9cdabeca/controller/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.086552 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-frr-files/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.204712 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-frr-files/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.242832 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-reloader/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.301673 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-metrics/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.304301 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-reloader/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.497285 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-frr-files/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.569637 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-reloader/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.609991 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-metrics/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.616453 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-metrics/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.730307 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-frr-files/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.776978 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-reloader/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.807256 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/cp-metrics/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.832367 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/controller/0.log" Feb 01 08:13:54 crc kubenswrapper[4650]: I0201 08:13:54.957702 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/frr-metrics/0.log" Feb 01 08:13:55 crc kubenswrapper[4650]: I0201 08:13:55.096586 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/kube-rbac-proxy/0.log" Feb 01 08:13:55 crc kubenswrapper[4650]: I0201 08:13:55.155116 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/kube-rbac-proxy-frr/0.log" Feb 01 08:13:55 crc kubenswrapper[4650]: I0201 08:13:55.228403 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/reloader/0.log" Feb 01 08:13:55 crc kubenswrapper[4650]: I0201 08:13:55.453113 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-vcsl8_ef83d5c0-5353-4e94-854d-e34141ac2982/frr-k8s-webhook-server/0.log" Feb 01 08:13:55 crc kubenswrapper[4650]: I0201 08:13:55.642127 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-84bfdcb548-gw4d9_c969ae59-bacb-4a3e-8849-11b28fcc5bb0/manager/0.log" Feb 01 08:13:55 crc kubenswrapper[4650]: I0201 08:13:55.857106 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6dc7659c5c-9s5g8_e3570c01-0443-47c6-9b10-dc810c49a308/webhook-server/0.log" Feb 01 08:13:55 crc kubenswrapper[4650]: I0201 08:13:55.983481 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-d8c5z_80fd8481-986f-4374-8c23-7da080041285/kube-rbac-proxy/0.log" Feb 01 08:13:56 crc kubenswrapper[4650]: I0201 08:13:56.003600 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dk5xx_cf9c9412-f2f2-490f-ab66-3d2bd543d519/frr/0.log" Feb 01 08:13:56 crc kubenswrapper[4650]: I0201 08:13:56.325727 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-d8c5z_80fd8481-986f-4374-8c23-7da080041285/speaker/0.log" Feb 01 08:13:58 crc kubenswrapper[4650]: I0201 08:13:58.966255 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:13:58 crc kubenswrapper[4650]: E0201 08:13:58.967438 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:14:03 crc kubenswrapper[4650]: I0201 08:14:03.965944 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:14:03 crc kubenswrapper[4650]: I0201 08:14:03.966542 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:14:03 crc kubenswrapper[4650]: I0201 08:14:03.966564 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:14:03 crc kubenswrapper[4650]: I0201 08:14:03.966609 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:14:03 crc kubenswrapper[4650]: I0201 08:14:03.966615 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:14:03 crc kubenswrapper[4650]: E0201 08:14:03.966982 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:14:06 crc kubenswrapper[4650]: I0201 08:14:06.964723 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:06 crc kubenswrapper[4650]: I0201 08:14:06.964951 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:14:06 crc kubenswrapper[4650]: E0201 08:14:06.965206 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:09 crc kubenswrapper[4650]: I0201 08:14:09.912618 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd_b1bc0d07-354f-441c-bc7b-b1160c88303b/util/0.log" Feb 01 08:14:09 crc kubenswrapper[4650]: I0201 08:14:09.995019 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd_b1bc0d07-354f-441c-bc7b-b1160c88303b/util/0.log" Feb 01 08:14:09 crc kubenswrapper[4650]: I0201 08:14:09.998087 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd_b1bc0d07-354f-441c-bc7b-b1160c88303b/pull/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.032512 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd_b1bc0d07-354f-441c-bc7b-b1160c88303b/pull/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.163886 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd_b1bc0d07-354f-441c-bc7b-b1160c88303b/extract/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.185894 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd_b1bc0d07-354f-441c-bc7b-b1160c88303b/util/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.250743 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcf5hnd_b1bc0d07-354f-441c-bc7b-b1160c88303b/pull/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.402392 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr_f647c2e5-f410-4aa3-b10b-1ef14f7702ba/util/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.544372 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr_f647c2e5-f410-4aa3-b10b-1ef14f7702ba/util/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.587416 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr_f647c2e5-f410-4aa3-b10b-1ef14f7702ba/pull/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.606628 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr_f647c2e5-f410-4aa3-b10b-1ef14f7702ba/pull/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.781128 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr_f647c2e5-f410-4aa3-b10b-1ef14f7702ba/pull/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.819637 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr_f647c2e5-f410-4aa3-b10b-1ef14f7702ba/util/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.820502 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713fjtqr_f647c2e5-f410-4aa3-b10b-1ef14f7702ba/extract/0.log" Feb 01 08:14:10 crc kubenswrapper[4650]: I0201 08:14:10.945965 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bmfh5_da8c05dd-e911-487b-ac7c-025796f9d671/extract-utilities/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.107235 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bmfh5_da8c05dd-e911-487b-ac7c-025796f9d671/extract-utilities/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.149379 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bmfh5_da8c05dd-e911-487b-ac7c-025796f9d671/extract-content/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.181310 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bmfh5_da8c05dd-e911-487b-ac7c-025796f9d671/extract-content/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.329436 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bmfh5_da8c05dd-e911-487b-ac7c-025796f9d671/extract-utilities/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.384014 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bmfh5_da8c05dd-e911-487b-ac7c-025796f9d671/extract-content/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.672108 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-p6dnj_6e914514-b71c-4747-8dfc-ae1eeef3c8a3/extract-utilities/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.759191 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bmfh5_da8c05dd-e911-487b-ac7c-025796f9d671/registry-server/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.866361 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-p6dnj_6e914514-b71c-4747-8dfc-ae1eeef3c8a3/extract-utilities/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.894703 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-p6dnj_6e914514-b71c-4747-8dfc-ae1eeef3c8a3/extract-content/0.log" Feb 01 08:14:11 crc kubenswrapper[4650]: I0201 08:14:11.898131 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-p6dnj_6e914514-b71c-4747-8dfc-ae1eeef3c8a3/extract-content/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.269044 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-p6dnj_6e914514-b71c-4747-8dfc-ae1eeef3c8a3/extract-content/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.299099 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-p6dnj_6e914514-b71c-4747-8dfc-ae1eeef3c8a3/extract-utilities/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.504719 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-vtbm9_384b3fd0-ca99-47ce-9a89-c6bf2d695888/marketplace-operator/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.599440 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wcj2n_ebbbb2f8-c26c-40e3-a357-0d43dee59901/extract-utilities/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.627770 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-p6dnj_6e914514-b71c-4747-8dfc-ae1eeef3c8a3/registry-server/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.796873 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wcj2n_ebbbb2f8-c26c-40e3-a357-0d43dee59901/extract-utilities/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.859745 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wcj2n_ebbbb2f8-c26c-40e3-a357-0d43dee59901/extract-content/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.868088 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wcj2n_ebbbb2f8-c26c-40e3-a357-0d43dee59901/extract-content/0.log" Feb 01 08:14:12 crc kubenswrapper[4650]: I0201 08:14:12.965459 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:14:12 crc kubenswrapper[4650]: E0201 08:14:12.965911 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.025817 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wcj2n_ebbbb2f8-c26c-40e3-a357-0d43dee59901/extract-utilities/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.033545 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wcj2n_ebbbb2f8-c26c-40e3-a357-0d43dee59901/extract-content/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.194756 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wcj2n_ebbbb2f8-c26c-40e3-a357-0d43dee59901/registry-server/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.281791 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vn96n_fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9/extract-utilities/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.449928 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vn96n_fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9/extract-content/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.474317 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vn96n_fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9/extract-utilities/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.504232 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vn96n_fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9/extract-content/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.761747 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vn96n_fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9/extract-utilities/0.log" Feb 01 08:14:13 crc kubenswrapper[4650]: I0201 08:14:13.779043 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vn96n_fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9/extract-content/0.log" Feb 01 08:14:14 crc kubenswrapper[4650]: I0201 08:14:14.048325 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vn96n_fd8d740d-ffe6-47ef-8c1e-8dfdafc2c9b9/registry-server/0.log" Feb 01 08:14:14 crc kubenswrapper[4650]: I0201 08:14:14.966344 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:14:14 crc kubenswrapper[4650]: I0201 08:14:14.966494 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:14:14 crc kubenswrapper[4650]: I0201 08:14:14.966540 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:14:14 crc kubenswrapper[4650]: I0201 08:14:14.966647 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:14:14 crc kubenswrapper[4650]: I0201 08:14:14.966667 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:14:14 crc kubenswrapper[4650]: E0201 08:14:14.967294 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:14:21 crc kubenswrapper[4650]: I0201 08:14:20.999595 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:21 crc kubenswrapper[4650]: I0201 08:14:21.000237 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:14:21 crc kubenswrapper[4650]: E0201 08:14:21.212750 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:21 crc kubenswrapper[4650]: I0201 08:14:21.314757 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105"} Feb 01 08:14:21 crc kubenswrapper[4650]: I0201 08:14:21.315566 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:21 crc kubenswrapper[4650]: E0201 08:14:21.315838 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:21 crc kubenswrapper[4650]: I0201 08:14:21.316005 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:14:22 crc kubenswrapper[4650]: I0201 08:14:22.321285 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:22 crc kubenswrapper[4650]: E0201 08:14:22.321828 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:23 crc kubenswrapper[4650]: I0201 08:14:23.333500 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" exitCode=1 Feb 01 08:14:23 crc kubenswrapper[4650]: I0201 08:14:23.333601 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105"} Feb 01 08:14:23 crc kubenswrapper[4650]: I0201 08:14:23.334678 4650 scope.go:117] "RemoveContainer" containerID="64ec2b8a4cd5bac9fe3e34193c187e4b3feacdf86aeaa4158c8eaa5bab5de76d" Feb 01 08:14:23 crc kubenswrapper[4650]: I0201 08:14:23.336061 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:23 crc kubenswrapper[4650]: I0201 08:14:23.336108 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:14:23 crc kubenswrapper[4650]: E0201 08:14:23.336563 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:24 crc kubenswrapper[4650]: I0201 08:14:24.800337 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:14:24 crc kubenswrapper[4650]: I0201 08:14:24.801164 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:24 crc kubenswrapper[4650]: I0201 08:14:24.801188 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:14:24 crc kubenswrapper[4650]: E0201 08:14:24.801632 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:24 crc kubenswrapper[4650]: I0201 08:14:24.965486 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:14:24 crc kubenswrapper[4650]: E0201 08:14:24.966018 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:14:27 crc kubenswrapper[4650]: I0201 08:14:27.966508 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:14:27 crc kubenswrapper[4650]: I0201 08:14:27.966955 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:14:27 crc kubenswrapper[4650]: I0201 08:14:27.967001 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:14:27 crc kubenswrapper[4650]: I0201 08:14:27.967181 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:14:27 crc kubenswrapper[4650]: I0201 08:14:27.967197 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:14:27 crc kubenswrapper[4650]: E0201 08:14:27.968128 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:14:35 crc kubenswrapper[4650]: I0201 08:14:35.965346 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:14:35 crc kubenswrapper[4650]: E0201 08:14:35.965997 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:14:38 crc kubenswrapper[4650]: I0201 08:14:38.966139 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:14:38 crc kubenswrapper[4650]: I0201 08:14:38.966700 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:14:38 crc kubenswrapper[4650]: I0201 08:14:38.966757 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:14:38 crc kubenswrapper[4650]: I0201 08:14:38.966805 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:14:38 crc kubenswrapper[4650]: I0201 08:14:38.966811 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:14:38 crc kubenswrapper[4650]: E0201 08:14:38.967374 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:14:39 crc kubenswrapper[4650]: I0201 08:14:39.965170 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:39 crc kubenswrapper[4650]: I0201 08:14:39.965662 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:14:39 crc kubenswrapper[4650]: E0201 08:14:39.965990 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:49 crc kubenswrapper[4650]: I0201 08:14:49.966317 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:14:49 crc kubenswrapper[4650]: I0201 08:14:49.967049 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:14:49 crc kubenswrapper[4650]: I0201 08:14:49.967099 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:14:49 crc kubenswrapper[4650]: I0201 08:14:49.967208 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:14:49 crc kubenswrapper[4650]: I0201 08:14:49.967222 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:14:49 crc kubenswrapper[4650]: E0201 08:14:49.968017 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:14:50 crc kubenswrapper[4650]: I0201 08:14:50.965317 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:14:50 crc kubenswrapper[4650]: I0201 08:14:50.965352 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:14:50 crc kubenswrapper[4650]: E0201 08:14:50.965649 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:14:50 crc kubenswrapper[4650]: I0201 08:14:50.966076 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:14:50 crc kubenswrapper[4650]: E0201 08:14:50.966648 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.193200 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj"] Feb 01 08:15:00 crc kubenswrapper[4650]: E0201 08:15:00.194415 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20ac6da2-62fc-43ab-86f0-b5d19f3f5178" containerName="container-00" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.194443 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="20ac6da2-62fc-43ab-86f0-b5d19f3f5178" containerName="container-00" Feb 01 08:15:00 crc kubenswrapper[4650]: E0201 08:15:00.194469 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="extract-utilities" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.194481 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="extract-utilities" Feb 01 08:15:00 crc kubenswrapper[4650]: E0201 08:15:00.194503 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="registry-server" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.194515 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="registry-server" Feb 01 08:15:00 crc kubenswrapper[4650]: E0201 08:15:00.194566 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="extract-content" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.194579 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="extract-content" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.194887 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="20ac6da2-62fc-43ab-86f0-b5d19f3f5178" containerName="container-00" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.194909 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4ea2a73-8081-43e2-b8d4-9913f624e2cb" containerName="registry-server" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.195875 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.240928 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj"] Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.241429 4650 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.241846 4650 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.339891 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fb2c796f-4a53-4317-8630-4dca312984b7-secret-volume\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.340200 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74sw7\" (UniqueName: \"kubernetes.io/projected/fb2c796f-4a53-4317-8630-4dca312984b7-kube-api-access-74sw7\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.340311 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fb2c796f-4a53-4317-8630-4dca312984b7-config-volume\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.441857 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fb2c796f-4a53-4317-8630-4dca312984b7-secret-volume\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.442017 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74sw7\" (UniqueName: \"kubernetes.io/projected/fb2c796f-4a53-4317-8630-4dca312984b7-kube-api-access-74sw7\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.442100 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fb2c796f-4a53-4317-8630-4dca312984b7-config-volume\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.443747 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fb2c796f-4a53-4317-8630-4dca312984b7-config-volume\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.465674 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fb2c796f-4a53-4317-8630-4dca312984b7-secret-volume\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.469951 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74sw7\" (UniqueName: \"kubernetes.io/projected/fb2c796f-4a53-4317-8630-4dca312984b7-kube-api-access-74sw7\") pod \"collect-profiles-29498895-wcqpj\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:00 crc kubenswrapper[4650]: I0201 08:15:00.528779 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:01 crc kubenswrapper[4650]: I0201 08:15:01.061363 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj"] Feb 01 08:15:01 crc kubenswrapper[4650]: W0201 08:15:01.079182 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb2c796f_4a53_4317_8630_4dca312984b7.slice/crio-e2f0d9f390c2a2b49e1ed54f715918a41a710fca62fae713cb1cf6685607967e WatchSource:0}: Error finding container e2f0d9f390c2a2b49e1ed54f715918a41a710fca62fae713cb1cf6685607967e: Status 404 returned error can't find the container with id e2f0d9f390c2a2b49e1ed54f715918a41a710fca62fae713cb1cf6685607967e Feb 01 08:15:01 crc kubenswrapper[4650]: I0201 08:15:01.656913 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" event={"ID":"fb2c796f-4a53-4317-8630-4dca312984b7","Type":"ContainerStarted","Data":"87a31399f5fc5e540d1410bb8b8ca11e4c7caf8a10e33838f84895577852ee95"} Feb 01 08:15:01 crc kubenswrapper[4650]: I0201 08:15:01.657356 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" event={"ID":"fb2c796f-4a53-4317-8630-4dca312984b7","Type":"ContainerStarted","Data":"e2f0d9f390c2a2b49e1ed54f715918a41a710fca62fae713cb1cf6685607967e"} Feb 01 08:15:01 crc kubenswrapper[4650]: I0201 08:15:01.685387 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" podStartSLOduration=1.685365757 podStartE2EDuration="1.685365757s" podCreationTimestamp="2026-02-01 08:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-01 08:15:01.677263641 +0000 UTC m=+3100.400361886" watchObservedRunningTime="2026-02-01 08:15:01.685365757 +0000 UTC m=+3100.408464002" Feb 01 08:15:01 crc kubenswrapper[4650]: I0201 08:15:01.972654 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:15:01 crc kubenswrapper[4650]: I0201 08:15:01.973004 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:15:01 crc kubenswrapper[4650]: E0201 08:15:01.973321 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:15:02 crc kubenswrapper[4650]: I0201 08:15:02.672303 4650 generic.go:334] "Generic (PLEG): container finished" podID="fb2c796f-4a53-4317-8630-4dca312984b7" containerID="87a31399f5fc5e540d1410bb8b8ca11e4c7caf8a10e33838f84895577852ee95" exitCode=0 Feb 01 08:15:02 crc kubenswrapper[4650]: I0201 08:15:02.672346 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" event={"ID":"fb2c796f-4a53-4317-8630-4dca312984b7","Type":"ContainerDied","Data":"87a31399f5fc5e540d1410bb8b8ca11e4c7caf8a10e33838f84895577852ee95"} Feb 01 08:15:02 crc kubenswrapper[4650]: I0201 08:15:02.967086 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:15:02 crc kubenswrapper[4650]: E0201 08:15:02.967470 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:15:03 crc kubenswrapper[4650]: I0201 08:15:03.965873 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:15:03 crc kubenswrapper[4650]: I0201 08:15:03.966302 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:15:03 crc kubenswrapper[4650]: I0201 08:15:03.966326 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:15:03 crc kubenswrapper[4650]: I0201 08:15:03.966377 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:15:03 crc kubenswrapper[4650]: I0201 08:15:03.966384 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.096580 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.129156 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74sw7\" (UniqueName: \"kubernetes.io/projected/fb2c796f-4a53-4317-8630-4dca312984b7-kube-api-access-74sw7\") pod \"fb2c796f-4a53-4317-8630-4dca312984b7\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.129206 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fb2c796f-4a53-4317-8630-4dca312984b7-config-volume\") pod \"fb2c796f-4a53-4317-8630-4dca312984b7\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.129253 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fb2c796f-4a53-4317-8630-4dca312984b7-secret-volume\") pod \"fb2c796f-4a53-4317-8630-4dca312984b7\" (UID: \"fb2c796f-4a53-4317-8630-4dca312984b7\") " Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.134530 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb2c796f-4a53-4317-8630-4dca312984b7-config-volume" (OuterVolumeSpecName: "config-volume") pod "fb2c796f-4a53-4317-8630-4dca312984b7" (UID: "fb2c796f-4a53-4317-8630-4dca312984b7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.137904 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb2c796f-4a53-4317-8630-4dca312984b7-kube-api-access-74sw7" (OuterVolumeSpecName: "kube-api-access-74sw7") pod "fb2c796f-4a53-4317-8630-4dca312984b7" (UID: "fb2c796f-4a53-4317-8630-4dca312984b7"). InnerVolumeSpecName "kube-api-access-74sw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.139334 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb2c796f-4a53-4317-8630-4dca312984b7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fb2c796f-4a53-4317-8630-4dca312984b7" (UID: "fb2c796f-4a53-4317-8630-4dca312984b7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 01 08:15:04 crc kubenswrapper[4650]: E0201 08:15:04.189343 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.230894 4650 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fb2c796f-4a53-4317-8630-4dca312984b7-config-volume\") on node \"crc\" DevicePath \"\"" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.230919 4650 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fb2c796f-4a53-4317-8630-4dca312984b7-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.230949 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74sw7\" (UniqueName: \"kubernetes.io/projected/fb2c796f-4a53-4317-8630-4dca312984b7-kube-api-access-74sw7\") on node \"crc\" DevicePath \"\"" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.705215 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b"} Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.706353 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.706473 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.706624 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.706640 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:15:04 crc kubenswrapper[4650]: E0201 08:15:04.707331 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.711991 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" event={"ID":"fb2c796f-4a53-4317-8630-4dca312984b7","Type":"ContainerDied","Data":"e2f0d9f390c2a2b49e1ed54f715918a41a710fca62fae713cb1cf6685607967e"} Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.712068 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29498895-wcqpj" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.712091 4650 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2f0d9f390c2a2b49e1ed54f715918a41a710fca62fae713cb1cf6685607967e" Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.821067 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz"] Feb 01 08:15:04 crc kubenswrapper[4650]: I0201 08:15:04.829828 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29498850-6hlsz"] Feb 01 08:15:05 crc kubenswrapper[4650]: I0201 08:15:05.567652 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:15:05 crc kubenswrapper[4650]: E0201 08:15:05.567887 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:15:05 crc kubenswrapper[4650]: E0201 08:15:05.567984 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:17:07.567961099 +0000 UTC m=+3226.291059354 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:15:05 crc kubenswrapper[4650]: I0201 08:15:05.985659 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7091e599-b67c-44d8-945c-329bac31dd6d" path="/var/lib/kubelet/pods/7091e599-b67c-44d8-945c-329bac31dd6d/volumes" Feb 01 08:15:15 crc kubenswrapper[4650]: I0201 08:15:15.965851 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:15:15 crc kubenswrapper[4650]: I0201 08:15:15.967613 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:15:15 crc kubenswrapper[4650]: E0201 08:15:15.968009 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:15:16 crc kubenswrapper[4650]: I0201 08:15:16.965961 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:15:16 crc kubenswrapper[4650]: E0201 08:15:16.966676 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.828322 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" exitCode=1 Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.828365 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b"} Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.828397 4650 scope.go:117] "RemoveContainer" containerID="4ff9e6c47350a106be3a404854e5b6ae27ad7fdd5ea905ae4705d7207f51df83" Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.831394 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.831456 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.831476 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.834925 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:15:18 crc kubenswrapper[4650]: I0201 08:15:18.834944 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:15:18 crc kubenswrapper[4650]: E0201 08:15:18.835538 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:15:21 crc kubenswrapper[4650]: E0201 08:15:21.770618 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:15:21 crc kubenswrapper[4650]: I0201 08:15:21.869596 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:15:27 crc kubenswrapper[4650]: I0201 08:15:27.766354 4650 scope.go:117] "RemoveContainer" containerID="bd95859aed792eacf3a5edd64fccdcd14a6c094b34f7f182e8f0adbbb000c6ac" Feb 01 08:15:28 crc kubenswrapper[4650]: I0201 08:15:28.965319 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:15:28 crc kubenswrapper[4650]: I0201 08:15:28.965655 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:15:28 crc kubenswrapper[4650]: E0201 08:15:28.965923 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:15:31 crc kubenswrapper[4650]: I0201 08:15:31.971539 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:15:31 crc kubenswrapper[4650]: E0201 08:15:31.972366 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:15:33 crc kubenswrapper[4650]: I0201 08:15:33.970855 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:15:33 crc kubenswrapper[4650]: I0201 08:15:33.971818 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:15:33 crc kubenswrapper[4650]: I0201 08:15:33.971913 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:15:33 crc kubenswrapper[4650]: I0201 08:15:33.972067 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:15:33 crc kubenswrapper[4650]: I0201 08:15:33.972183 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:15:33 crc kubenswrapper[4650]: E0201 08:15:33.972618 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:15:40 crc kubenswrapper[4650]: I0201 08:15:40.966444 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:15:40 crc kubenswrapper[4650]: I0201 08:15:40.968303 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:15:40 crc kubenswrapper[4650]: E0201 08:15:40.968796 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:15:44 crc kubenswrapper[4650]: I0201 08:15:44.965397 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:15:44 crc kubenswrapper[4650]: E0201 08:15:44.966838 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:15:47 crc kubenswrapper[4650]: I0201 08:15:47.970037 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:15:47 crc kubenswrapper[4650]: I0201 08:15:47.970393 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:15:47 crc kubenswrapper[4650]: I0201 08:15:47.970414 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:15:47 crc kubenswrapper[4650]: I0201 08:15:47.970461 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:15:47 crc kubenswrapper[4650]: I0201 08:15:47.970468 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:15:47 crc kubenswrapper[4650]: E0201 08:15:47.970832 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:15:49 crc kubenswrapper[4650]: I0201 08:15:49.568186 4650 generic.go:334] "Generic (PLEG): container finished" podID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerID="e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d" exitCode=0 Feb 01 08:15:49 crc kubenswrapper[4650]: I0201 08:15:49.568258 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" event={"ID":"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd","Type":"ContainerDied","Data":"e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d"} Feb 01 08:15:49 crc kubenswrapper[4650]: I0201 08:15:49.569249 4650 scope.go:117] "RemoveContainer" containerID="e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d" Feb 01 08:15:50 crc kubenswrapper[4650]: I0201 08:15:50.598513 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bzzm4_must-gather-zc9sw_c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd/gather/0.log" Feb 01 08:15:52 crc kubenswrapper[4650]: I0201 08:15:52.965977 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:15:52 crc kubenswrapper[4650]: I0201 08:15:52.968168 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:15:52 crc kubenswrapper[4650]: E0201 08:15:52.968832 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.057868 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bzzm4/must-gather-zc9sw"] Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.058648 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerName="copy" containerID="cri-o://ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f" gracePeriod=2 Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.068318 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bzzm4/must-gather-zc9sw"] Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.486733 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bzzm4_must-gather-zc9sw_c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd/copy/0.log" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.487633 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.657499 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-must-gather-output\") pod \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.657615 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hbcm\" (UniqueName: \"kubernetes.io/projected/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-kube-api-access-5hbcm\") pod \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\" (UID: \"c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd\") " Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.662320 4650 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bzzm4_must-gather-zc9sw_c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd/copy/0.log" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.662673 4650 generic.go:334] "Generic (PLEG): container finished" podID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerID="ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f" exitCode=143 Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.662722 4650 scope.go:117] "RemoveContainer" containerID="ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.662736 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzzm4/must-gather-zc9sw" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.664783 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-kube-api-access-5hbcm" (OuterVolumeSpecName: "kube-api-access-5hbcm") pod "c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" (UID: "c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd"). InnerVolumeSpecName "kube-api-access-5hbcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.720110 4650 scope.go:117] "RemoveContainer" containerID="e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.760300 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hbcm\" (UniqueName: \"kubernetes.io/projected/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-kube-api-access-5hbcm\") on node \"crc\" DevicePath \"\"" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.795404 4650 scope.go:117] "RemoveContainer" containerID="ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f" Feb 01 08:15:58 crc kubenswrapper[4650]: E0201 08:15:58.797734 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f\": container with ID starting with ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f not found: ID does not exist" containerID="ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.797882 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f"} err="failed to get container status \"ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f\": rpc error: code = NotFound desc = could not find container \"ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f\": container with ID starting with ff712753a0593b2cb91375f8c67294688258203e97fa63e2471a610c73164a7f not found: ID does not exist" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.797978 4650 scope.go:117] "RemoveContainer" containerID="e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d" Feb 01 08:15:58 crc kubenswrapper[4650]: E0201 08:15:58.798576 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d\": container with ID starting with e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d not found: ID does not exist" containerID="e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.798621 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d"} err="failed to get container status \"e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d\": rpc error: code = NotFound desc = could not find container \"e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d\": container with ID starting with e413101f86d5c585facc8dd355e8a9db57b708b7c0408ed3f7dbfe0d300ab41d not found: ID does not exist" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.801684 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" (UID: "c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:15:58 crc kubenswrapper[4650]: I0201 08:15:58.861666 4650 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd-must-gather-output\") on node \"crc\" DevicePath \"\"" Feb 01 08:15:59 crc kubenswrapper[4650]: I0201 08:15:59.969786 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:15:59 crc kubenswrapper[4650]: E0201 08:15:59.970144 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:15:59 crc kubenswrapper[4650]: I0201 08:15:59.970701 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:15:59 crc kubenswrapper[4650]: I0201 08:15:59.970764 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:15:59 crc kubenswrapper[4650]: I0201 08:15:59.970786 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:15:59 crc kubenswrapper[4650]: I0201 08:15:59.970829 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:15:59 crc kubenswrapper[4650]: I0201 08:15:59.970836 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:15:59 crc kubenswrapper[4650]: E0201 08:15:59.971118 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:15:59 crc kubenswrapper[4650]: I0201 08:15:59.979578 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" path="/var/lib/kubelet/pods/c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd/volumes" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.181564 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xccvg"] Feb 01 08:16:02 crc kubenswrapper[4650]: E0201 08:16:02.182536 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerName="gather" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.182548 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerName="gather" Feb 01 08:16:02 crc kubenswrapper[4650]: E0201 08:16:02.182571 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerName="copy" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.182577 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerName="copy" Feb 01 08:16:02 crc kubenswrapper[4650]: E0201 08:16:02.182592 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb2c796f-4a53-4317-8630-4dca312984b7" containerName="collect-profiles" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.182599 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb2c796f-4a53-4317-8630-4dca312984b7" containerName="collect-profiles" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.182800 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerName="gather" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.182810 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb2c796f-4a53-4317-8630-4dca312984b7" containerName="collect-profiles" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.182820 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5948eb4-b396-4d5c-9d9f-2be1ad9e2afd" containerName="copy" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.184050 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.195606 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xccvg"] Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.335932 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx84d\" (UniqueName: \"kubernetes.io/projected/7fa7677f-e39c-4496-9e1f-67f096256b84-kube-api-access-cx84d\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.336001 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-catalog-content\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.336075 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-utilities\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.438197 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx84d\" (UniqueName: \"kubernetes.io/projected/7fa7677f-e39c-4496-9e1f-67f096256b84-kube-api-access-cx84d\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.438262 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-catalog-content\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.438302 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-utilities\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.438641 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-catalog-content\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.438760 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-utilities\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.461846 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx84d\" (UniqueName: \"kubernetes.io/projected/7fa7677f-e39c-4496-9e1f-67f096256b84-kube-api-access-cx84d\") pod \"community-operators-xccvg\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.539478 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:02 crc kubenswrapper[4650]: I0201 08:16:02.948283 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xccvg"] Feb 01 08:16:03 crc kubenswrapper[4650]: I0201 08:16:03.710526 4650 generic.go:334] "Generic (PLEG): container finished" podID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerID="410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249" exitCode=0 Feb 01 08:16:03 crc kubenswrapper[4650]: I0201 08:16:03.710674 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xccvg" event={"ID":"7fa7677f-e39c-4496-9e1f-67f096256b84","Type":"ContainerDied","Data":"410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249"} Feb 01 08:16:03 crc kubenswrapper[4650]: I0201 08:16:03.710983 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xccvg" event={"ID":"7fa7677f-e39c-4496-9e1f-67f096256b84","Type":"ContainerStarted","Data":"c0bdfedca84a8da292835337e894aa1e7f70932660f9468e5a4c5a1028c5adcb"} Feb 01 08:16:03 crc kubenswrapper[4650]: I0201 08:16:03.713802 4650 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 01 08:16:04 crc kubenswrapper[4650]: I0201 08:16:04.719333 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xccvg" event={"ID":"7fa7677f-e39c-4496-9e1f-67f096256b84","Type":"ContainerStarted","Data":"9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0"} Feb 01 08:16:05 crc kubenswrapper[4650]: I0201 08:16:05.732579 4650 generic.go:334] "Generic (PLEG): container finished" podID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerID="9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0" exitCode=0 Feb 01 08:16:05 crc kubenswrapper[4650]: I0201 08:16:05.732657 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xccvg" event={"ID":"7fa7677f-e39c-4496-9e1f-67f096256b84","Type":"ContainerDied","Data":"9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0"} Feb 01 08:16:05 crc kubenswrapper[4650]: I0201 08:16:05.961830 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4vgmm"] Feb 01 08:16:05 crc kubenswrapper[4650]: I0201 08:16:05.966116 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.068989 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4vgmm"] Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.117315 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-catalog-content\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.117687 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-utilities\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.117802 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfmht\" (UniqueName: \"kubernetes.io/projected/92f9bbf3-e779-40be-81d3-7058073fd3d2-kube-api-access-jfmht\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.219277 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-utilities\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.219621 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfmht\" (UniqueName: \"kubernetes.io/projected/92f9bbf3-e779-40be-81d3-7058073fd3d2-kube-api-access-jfmht\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.219742 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-catalog-content\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.220382 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-catalog-content\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.220548 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-utilities\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.241653 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfmht\" (UniqueName: \"kubernetes.io/projected/92f9bbf3-e779-40be-81d3-7058073fd3d2-kube-api-access-jfmht\") pod \"certified-operators-4vgmm\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.318384 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.691388 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4vgmm"] Feb 01 08:16:06 crc kubenswrapper[4650]: W0201 08:16:06.728080 4650 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92f9bbf3_e779_40be_81d3_7058073fd3d2.slice/crio-b186513bc632b4dc571431aa9eb4c12fcf2a4d19a173f2dad3f78f260742a378 WatchSource:0}: Error finding container b186513bc632b4dc571431aa9eb4c12fcf2a4d19a173f2dad3f78f260742a378: Status 404 returned error can't find the container with id b186513bc632b4dc571431aa9eb4c12fcf2a4d19a173f2dad3f78f260742a378 Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.750538 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vgmm" event={"ID":"92f9bbf3-e779-40be-81d3-7058073fd3d2","Type":"ContainerStarted","Data":"b186513bc632b4dc571431aa9eb4c12fcf2a4d19a173f2dad3f78f260742a378"} Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.753536 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xccvg" event={"ID":"7fa7677f-e39c-4496-9e1f-67f096256b84","Type":"ContainerStarted","Data":"9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e"} Feb 01 08:16:06 crc kubenswrapper[4650]: I0201 08:16:06.784056 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xccvg" podStartSLOduration=2.252709016 podStartE2EDuration="4.784041941s" podCreationTimestamp="2026-02-01 08:16:02 +0000 UTC" firstStartedPulling="2026-02-01 08:16:03.713547177 +0000 UTC m=+3162.436645432" lastFinishedPulling="2026-02-01 08:16:06.244880112 +0000 UTC m=+3164.967978357" observedRunningTime="2026-02-01 08:16:06.780312032 +0000 UTC m=+3165.503410287" watchObservedRunningTime="2026-02-01 08:16:06.784041941 +0000 UTC m=+3165.507140186" Feb 01 08:16:07 crc kubenswrapper[4650]: I0201 08:16:07.762842 4650 generic.go:334] "Generic (PLEG): container finished" podID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerID="a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece" exitCode=0 Feb 01 08:16:07 crc kubenswrapper[4650]: I0201 08:16:07.762949 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vgmm" event={"ID":"92f9bbf3-e779-40be-81d3-7058073fd3d2","Type":"ContainerDied","Data":"a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece"} Feb 01 08:16:07 crc kubenswrapper[4650]: I0201 08:16:07.966090 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:16:07 crc kubenswrapper[4650]: I0201 08:16:07.966116 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:16:07 crc kubenswrapper[4650]: E0201 08:16:07.966339 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:16:08 crc kubenswrapper[4650]: I0201 08:16:08.772660 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vgmm" event={"ID":"92f9bbf3-e779-40be-81d3-7058073fd3d2","Type":"ContainerStarted","Data":"e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e"} Feb 01 08:16:10 crc kubenswrapper[4650]: I0201 08:16:10.790201 4650 generic.go:334] "Generic (PLEG): container finished" podID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerID="e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e" exitCode=0 Feb 01 08:16:10 crc kubenswrapper[4650]: I0201 08:16:10.790301 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vgmm" event={"ID":"92f9bbf3-e779-40be-81d3-7058073fd3d2","Type":"ContainerDied","Data":"e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e"} Feb 01 08:16:10 crc kubenswrapper[4650]: I0201 08:16:10.965797 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:16:10 crc kubenswrapper[4650]: E0201 08:16:10.966214 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:16:11 crc kubenswrapper[4650]: I0201 08:16:11.801282 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vgmm" event={"ID":"92f9bbf3-e779-40be-81d3-7058073fd3d2","Type":"ContainerStarted","Data":"073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a"} Feb 01 08:16:11 crc kubenswrapper[4650]: I0201 08:16:11.825211 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4vgmm" podStartSLOduration=3.423085812 podStartE2EDuration="6.825190488s" podCreationTimestamp="2026-02-01 08:16:05 +0000 UTC" firstStartedPulling="2026-02-01 08:16:07.764741229 +0000 UTC m=+3166.487839474" lastFinishedPulling="2026-02-01 08:16:11.166845905 +0000 UTC m=+3169.889944150" observedRunningTime="2026-02-01 08:16:11.819205189 +0000 UTC m=+3170.542303454" watchObservedRunningTime="2026-02-01 08:16:11.825190488 +0000 UTC m=+3170.548288733" Feb 01 08:16:12 crc kubenswrapper[4650]: I0201 08:16:12.540886 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:12 crc kubenswrapper[4650]: I0201 08:16:12.541263 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:12 crc kubenswrapper[4650]: I0201 08:16:12.607757 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:12 crc kubenswrapper[4650]: I0201 08:16:12.860963 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:13 crc kubenswrapper[4650]: I0201 08:16:13.967207 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:16:13 crc kubenswrapper[4650]: I0201 08:16:13.967302 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:16:13 crc kubenswrapper[4650]: I0201 08:16:13.967333 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:16:13 crc kubenswrapper[4650]: I0201 08:16:13.967402 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:16:13 crc kubenswrapper[4650]: I0201 08:16:13.967411 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:16:13 crc kubenswrapper[4650]: E0201 08:16:13.967880 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:16:14 crc kubenswrapper[4650]: I0201 08:16:14.331974 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xccvg"] Feb 01 08:16:14 crc kubenswrapper[4650]: I0201 08:16:14.829451 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xccvg" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="registry-server" containerID="cri-o://9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e" gracePeriod=2 Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.788427 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.839974 4650 generic.go:334] "Generic (PLEG): container finished" podID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerID="9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e" exitCode=0 Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.840053 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xccvg" event={"ID":"7fa7677f-e39c-4496-9e1f-67f096256b84","Type":"ContainerDied","Data":"9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e"} Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.840130 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xccvg" event={"ID":"7fa7677f-e39c-4496-9e1f-67f096256b84","Type":"ContainerDied","Data":"c0bdfedca84a8da292835337e894aa1e7f70932660f9468e5a4c5a1028c5adcb"} Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.840149 4650 scope.go:117] "RemoveContainer" containerID="9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.840150 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xccvg" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.865431 4650 scope.go:117] "RemoveContainer" containerID="9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.886203 4650 scope.go:117] "RemoveContainer" containerID="410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.927681 4650 scope.go:117] "RemoveContainer" containerID="9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e" Feb 01 08:16:15 crc kubenswrapper[4650]: E0201 08:16:15.928202 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e\": container with ID starting with 9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e not found: ID does not exist" containerID="9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.928233 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e"} err="failed to get container status \"9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e\": rpc error: code = NotFound desc = could not find container \"9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e\": container with ID starting with 9ce20c36d03162fa8da925e60715f9faa1e4571cf8e059e1871a3f9ca64dc80e not found: ID does not exist" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.928252 4650 scope.go:117] "RemoveContainer" containerID="9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0" Feb 01 08:16:15 crc kubenswrapper[4650]: E0201 08:16:15.928510 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0\": container with ID starting with 9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0 not found: ID does not exist" containerID="9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.928533 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0"} err="failed to get container status \"9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0\": rpc error: code = NotFound desc = could not find container \"9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0\": container with ID starting with 9ba954de90e00c3ab68d91ce0c8fe3cd207e8bc07fae52fd5edf9cf0f3f651b0 not found: ID does not exist" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.928548 4650 scope.go:117] "RemoveContainer" containerID="410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249" Feb 01 08:16:15 crc kubenswrapper[4650]: E0201 08:16:15.928747 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249\": container with ID starting with 410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249 not found: ID does not exist" containerID="410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.928767 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249"} err="failed to get container status \"410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249\": rpc error: code = NotFound desc = could not find container \"410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249\": container with ID starting with 410ba27949922f936da5802862b55c0ea8f37c99062b33f4b8c5691bc3fa1249 not found: ID does not exist" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.934698 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-catalog-content\") pod \"7fa7677f-e39c-4496-9e1f-67f096256b84\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.934874 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cx84d\" (UniqueName: \"kubernetes.io/projected/7fa7677f-e39c-4496-9e1f-67f096256b84-kube-api-access-cx84d\") pod \"7fa7677f-e39c-4496-9e1f-67f096256b84\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.934954 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-utilities\") pod \"7fa7677f-e39c-4496-9e1f-67f096256b84\" (UID: \"7fa7677f-e39c-4496-9e1f-67f096256b84\") " Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.935896 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-utilities" (OuterVolumeSpecName: "utilities") pod "7fa7677f-e39c-4496-9e1f-67f096256b84" (UID: "7fa7677f-e39c-4496-9e1f-67f096256b84"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:16:15 crc kubenswrapper[4650]: I0201 08:16:15.942205 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fa7677f-e39c-4496-9e1f-67f096256b84-kube-api-access-cx84d" (OuterVolumeSpecName: "kube-api-access-cx84d") pod "7fa7677f-e39c-4496-9e1f-67f096256b84" (UID: "7fa7677f-e39c-4496-9e1f-67f096256b84"). InnerVolumeSpecName "kube-api-access-cx84d". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.001078 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fa7677f-e39c-4496-9e1f-67f096256b84" (UID: "7fa7677f-e39c-4496-9e1f-67f096256b84"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.037296 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.037329 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cx84d\" (UniqueName: \"kubernetes.io/projected/7fa7677f-e39c-4496-9e1f-67f096256b84-kube-api-access-cx84d\") on node \"crc\" DevicePath \"\"" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.037339 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fa7677f-e39c-4496-9e1f-67f096256b84-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.183484 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xccvg"] Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.191412 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xccvg"] Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.324292 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.324377 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.419457 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:16 crc kubenswrapper[4650]: I0201 08:16:16.903728 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:17 crc kubenswrapper[4650]: I0201 08:16:17.977701 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" path="/var/lib/kubelet/pods/7fa7677f-e39c-4496-9e1f-67f096256b84/volumes" Feb 01 08:16:18 crc kubenswrapper[4650]: I0201 08:16:18.743397 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4vgmm"] Feb 01 08:16:18 crc kubenswrapper[4650]: I0201 08:16:18.877147 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4vgmm" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="registry-server" containerID="cri-o://073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a" gracePeriod=2 Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.863967 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.892736 4650 generic.go:334] "Generic (PLEG): container finished" podID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerID="073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a" exitCode=0 Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.893243 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vgmm" Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.893225 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vgmm" event={"ID":"92f9bbf3-e779-40be-81d3-7058073fd3d2","Type":"ContainerDied","Data":"073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a"} Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.893738 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vgmm" event={"ID":"92f9bbf3-e779-40be-81d3-7058073fd3d2","Type":"ContainerDied","Data":"b186513bc632b4dc571431aa9eb4c12fcf2a4d19a173f2dad3f78f260742a378"} Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.893783 4650 scope.go:117] "RemoveContainer" containerID="073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a" Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.922189 4650 scope.go:117] "RemoveContainer" containerID="e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e" Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.946989 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-utilities\") pod \"92f9bbf3-e779-40be-81d3-7058073fd3d2\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.947124 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-catalog-content\") pod \"92f9bbf3-e779-40be-81d3-7058073fd3d2\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.947221 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfmht\" (UniqueName: \"kubernetes.io/projected/92f9bbf3-e779-40be-81d3-7058073fd3d2-kube-api-access-jfmht\") pod \"92f9bbf3-e779-40be-81d3-7058073fd3d2\" (UID: \"92f9bbf3-e779-40be-81d3-7058073fd3d2\") " Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.948151 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-utilities" (OuterVolumeSpecName: "utilities") pod "92f9bbf3-e779-40be-81d3-7058073fd3d2" (UID: "92f9bbf3-e779-40be-81d3-7058073fd3d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.958947 4650 scope.go:117] "RemoveContainer" containerID="a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece" Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.960597 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92f9bbf3-e779-40be-81d3-7058073fd3d2-kube-api-access-jfmht" (OuterVolumeSpecName: "kube-api-access-jfmht") pod "92f9bbf3-e779-40be-81d3-7058073fd3d2" (UID: "92f9bbf3-e779-40be-81d3-7058073fd3d2"). InnerVolumeSpecName "kube-api-access-jfmht". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:16:19 crc kubenswrapper[4650]: I0201 08:16:19.996998 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92f9bbf3-e779-40be-81d3-7058073fd3d2" (UID: "92f9bbf3-e779-40be-81d3-7058073fd3d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.015191 4650 scope.go:117] "RemoveContainer" containerID="073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a" Feb 01 08:16:20 crc kubenswrapper[4650]: E0201 08:16:20.015686 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a\": container with ID starting with 073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a not found: ID does not exist" containerID="073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.015731 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a"} err="failed to get container status \"073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a\": rpc error: code = NotFound desc = could not find container \"073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a\": container with ID starting with 073416a6b6e3a284e641fa19c810fd80ff8e7190035900e6d858dc6ecb81b72a not found: ID does not exist" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.015760 4650 scope.go:117] "RemoveContainer" containerID="e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e" Feb 01 08:16:20 crc kubenswrapper[4650]: E0201 08:16:20.016763 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e\": container with ID starting with e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e not found: ID does not exist" containerID="e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.016790 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e"} err="failed to get container status \"e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e\": rpc error: code = NotFound desc = could not find container \"e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e\": container with ID starting with e34ab3d24c60f351d4cc2e8845685de8c14922aec53a735bf818dfbd39ed8a0e not found: ID does not exist" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.016808 4650 scope.go:117] "RemoveContainer" containerID="a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece" Feb 01 08:16:20 crc kubenswrapper[4650]: E0201 08:16:20.017713 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece\": container with ID starting with a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece not found: ID does not exist" containerID="a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.017743 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece"} err="failed to get container status \"a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece\": rpc error: code = NotFound desc = could not find container \"a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece\": container with ID starting with a67f32ada762ecdc62f4d7458eeb73540004355d13c6595b0dbb4abd31100ece not found: ID does not exist" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.051065 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.051129 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfmht\" (UniqueName: \"kubernetes.io/projected/92f9bbf3-e779-40be-81d3-7058073fd3d2-kube-api-access-jfmht\") on node \"crc\" DevicePath \"\"" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.051142 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f9bbf3-e779-40be-81d3-7058073fd3d2-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.247227 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4vgmm"] Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.255068 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4vgmm"] Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.967018 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:16:20 crc kubenswrapper[4650]: I0201 08:16:20.967074 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:16:20 crc kubenswrapper[4650]: E0201 08:16:20.967344 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:16:21 crc kubenswrapper[4650]: I0201 08:16:21.976233 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" path="/var/lib/kubelet/pods/92f9bbf3-e779-40be-81d3-7058073fd3d2/volumes" Feb 01 08:16:24 crc kubenswrapper[4650]: I0201 08:16:24.965840 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:16:24 crc kubenswrapper[4650]: I0201 08:16:24.966721 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:16:24 crc kubenswrapper[4650]: I0201 08:16:24.966846 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:16:24 crc kubenswrapper[4650]: I0201 08:16:24.966891 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:16:24 crc kubenswrapper[4650]: I0201 08:16:24.966992 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:16:24 crc kubenswrapper[4650]: I0201 08:16:24.967006 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:16:24 crc kubenswrapper[4650]: E0201 08:16:24.966883 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xfq9r_openshift-machine-config-operator(8dd1b5da-94bb-4bf2-8fed-958df80a8806)\"" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" Feb 01 08:16:24 crc kubenswrapper[4650]: E0201 08:16:24.967786 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:16:34 crc kubenswrapper[4650]: I0201 08:16:34.966138 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:16:34 crc kubenswrapper[4650]: I0201 08:16:34.966977 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:16:34 crc kubenswrapper[4650]: E0201 08:16:34.967546 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:16:39 crc kubenswrapper[4650]: I0201 08:16:39.985790 4650 scope.go:117] "RemoveContainer" containerID="8383ff03dc2cb9bd40456ae792e18d6c9545d0b24483b54fd44fb1f8217603b5" Feb 01 08:16:39 crc kubenswrapper[4650]: I0201 08:16:39.987548 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:16:39 crc kubenswrapper[4650]: I0201 08:16:39.987648 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:16:39 crc kubenswrapper[4650]: I0201 08:16:39.987703 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:16:39 crc kubenswrapper[4650]: I0201 08:16:39.987792 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:16:39 crc kubenswrapper[4650]: I0201 08:16:39.987803 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:16:39 crc kubenswrapper[4650]: E0201 08:16:39.988595 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:16:41 crc kubenswrapper[4650]: I0201 08:16:41.136211 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" event={"ID":"8dd1b5da-94bb-4bf2-8fed-958df80a8806","Type":"ContainerStarted","Data":"ae6a533e1afb9bc26dc0e96cdfa34d7313d70f7a8360d12f758a8e43a837798b"} Feb 01 08:16:49 crc kubenswrapper[4650]: I0201 08:16:49.965310 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:16:49 crc kubenswrapper[4650]: I0201 08:16:49.965857 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:16:49 crc kubenswrapper[4650]: E0201 08:16:49.966176 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:16:54 crc kubenswrapper[4650]: I0201 08:16:54.966006 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:16:54 crc kubenswrapper[4650]: I0201 08:16:54.966609 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:16:54 crc kubenswrapper[4650]: I0201 08:16:54.966640 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:16:54 crc kubenswrapper[4650]: I0201 08:16:54.966702 4650 scope.go:117] "RemoveContainer" containerID="981b1e70dfdd27ff2ba4b9adf9ede94c44b92430fb8ca0aeec849d893af747b1" Feb 01 08:16:54 crc kubenswrapper[4650]: I0201 08:16:54.966711 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:16:55 crc kubenswrapper[4650]: E0201 08:16:55.169692 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:16:55 crc kubenswrapper[4650]: I0201 08:16:55.279214 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"d5997ce1c44a51f4a5e4f15eb0d5009cddcbdd33818933b278081843626541eb"} Feb 01 08:16:55 crc kubenswrapper[4650]: I0201 08:16:55.279980 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:16:55 crc kubenswrapper[4650]: I0201 08:16:55.280044 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:16:55 crc kubenswrapper[4650]: I0201 08:16:55.280084 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:16:55 crc kubenswrapper[4650]: I0201 08:16:55.280146 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:16:55 crc kubenswrapper[4650]: E0201 08:16:55.280525 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:17:00 crc kubenswrapper[4650]: I0201 08:17:00.965640 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:17:00 crc kubenswrapper[4650]: I0201 08:17:00.966177 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:00 crc kubenswrapper[4650]: E0201 08:17:00.966485 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:07 crc kubenswrapper[4650]: I0201 08:17:07.668305 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:17:07 crc kubenswrapper[4650]: E0201 08:17:07.668540 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:17:07 crc kubenswrapper[4650]: E0201 08:17:07.669253 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:19:09.669205642 +0000 UTC m=+3348.392303927 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:17:08 crc kubenswrapper[4650]: I0201 08:17:08.966994 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:17:08 crc kubenswrapper[4650]: I0201 08:17:08.967150 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:17:08 crc kubenswrapper[4650]: I0201 08:17:08.967196 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:17:08 crc kubenswrapper[4650]: I0201 08:17:08.967319 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:17:08 crc kubenswrapper[4650]: E0201 08:17:08.967910 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:17:15 crc kubenswrapper[4650]: I0201 08:17:15.965789 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:17:15 crc kubenswrapper[4650]: I0201 08:17:15.966262 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:16 crc kubenswrapper[4650]: E0201 08:17:16.231624 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:16 crc kubenswrapper[4650]: I0201 08:17:16.493969 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"b960b69c7c80aa982a6dc3b9a0cf601a6cde9756bb8c3be067768e32c6cd32ac"} Feb 01 08:17:16 crc kubenswrapper[4650]: I0201 08:17:16.494569 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:16 crc kubenswrapper[4650]: E0201 08:17:16.494813 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:16 crc kubenswrapper[4650]: I0201 08:17:16.494862 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:17:17 crc kubenswrapper[4650]: I0201 08:17:17.501973 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:17 crc kubenswrapper[4650]: E0201 08:17:17.502596 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:21 crc kubenswrapper[4650]: I0201 08:17:21.811537 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:21 crc kubenswrapper[4650]: I0201 08:17:21.982608 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:17:21 crc kubenswrapper[4650]: I0201 08:17:21.983212 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:17:21 crc kubenswrapper[4650]: I0201 08:17:21.983343 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:17:21 crc kubenswrapper[4650]: I0201 08:17:21.983590 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:17:22 crc kubenswrapper[4650]: I0201 08:17:22.617792 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d"} Feb 01 08:17:22 crc kubenswrapper[4650]: I0201 08:17:22.617993 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerStarted","Data":"0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3"} Feb 01 08:17:22 crc kubenswrapper[4650]: E0201 08:17:22.636708 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.651460 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" exitCode=1 Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.651755 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" exitCode=1 Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.651763 4650 generic.go:334] "Generic (PLEG): container finished" podID="78a7b8d6-a107-4698-b85d-77d415755428" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" exitCode=1 Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.651657 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d"} Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.651791 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3"} Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.651802 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"78a7b8d6-a107-4698-b85d-77d415755428","Type":"ContainerDied","Data":"217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935"} Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.651819 4650 scope.go:117] "RemoveContainer" containerID="88f97158a78f875b9403d496d10ac8218aa100d4c1a74a5329b5f45d9a1951ef" Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.652810 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.652946 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.653005 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.653144 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:17:23 crc kubenswrapper[4650]: E0201 08:17:23.653707 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.709517 4650 scope.go:117] "RemoveContainer" containerID="8d5844dca53a9cf244fb342ae6a0e60e345eec4f0445158c0ab25a2324b60b60" Feb 01 08:17:23 crc kubenswrapper[4650]: I0201 08:17:23.766387 4650 scope.go:117] "RemoveContainer" containerID="b2a66e5776458963b43c41750f346286ec57a2373fc7efa0fcc41f80cd976076" Feb 01 08:17:24 crc kubenswrapper[4650]: I0201 08:17:24.674335 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:17:24 crc kubenswrapper[4650]: I0201 08:17:24.674491 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:17:24 crc kubenswrapper[4650]: I0201 08:17:24.674539 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:17:24 crc kubenswrapper[4650]: I0201 08:17:24.674668 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:17:24 crc kubenswrapper[4650]: E0201 08:17:24.675970 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:17:24 crc kubenswrapper[4650]: I0201 08:17:24.805688 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:24 crc kubenswrapper[4650]: I0201 08:17:24.807332 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:24 crc kubenswrapper[4650]: E0201 08:17:24.871932 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:17:25 crc kubenswrapper[4650]: I0201 08:17:25.680525 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:17:27 crc kubenswrapper[4650]: I0201 08:17:27.974242 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:27 crc kubenswrapper[4650]: I0201 08:17:27.974530 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:17:27 crc kubenswrapper[4650]: I0201 08:17:27.975182 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"b960b69c7c80aa982a6dc3b9a0cf601a6cde9756bb8c3be067768e32c6cd32ac"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:17:27 crc kubenswrapper[4650]: I0201 08:17:27.975198 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:27 crc kubenswrapper[4650]: I0201 08:17:27.975218 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://b960b69c7c80aa982a6dc3b9a0cf601a6cde9756bb8c3be067768e32c6cd32ac" gracePeriod=30 Feb 01 08:17:27 crc kubenswrapper[4650]: I0201 08:17:27.977997 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:28 crc kubenswrapper[4650]: I0201 08:17:28.709044 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="b960b69c7c80aa982a6dc3b9a0cf601a6cde9756bb8c3be067768e32c6cd32ac" exitCode=0 Feb 01 08:17:28 crc kubenswrapper[4650]: I0201 08:17:28.709087 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"b960b69c7c80aa982a6dc3b9a0cf601a6cde9756bb8c3be067768e32c6cd32ac"} Feb 01 08:17:28 crc kubenswrapper[4650]: I0201 08:17:28.709119 4650 scope.go:117] "RemoveContainer" containerID="40f3783df1e2cf44eebea1a429b3222bdd1eca9048076ecc287426431585f74c" Feb 01 08:17:28 crc kubenswrapper[4650]: E0201 08:17:28.844085 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:29 crc kubenswrapper[4650]: I0201 08:17:29.720817 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490"} Feb 01 08:17:29 crc kubenswrapper[4650]: I0201 08:17:29.721127 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:17:29 crc kubenswrapper[4650]: I0201 08:17:29.721720 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:29 crc kubenswrapper[4650]: E0201 08:17:29.721974 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:30 crc kubenswrapper[4650]: I0201 08:17:30.728374 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:30 crc kubenswrapper[4650]: E0201 08:17:30.728869 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:33 crc kubenswrapper[4650]: I0201 08:17:33.810513 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:34 crc kubenswrapper[4650]: I0201 08:17:34.807902 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:36 crc kubenswrapper[4650]: I0201 08:17:36.806166 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:37 crc kubenswrapper[4650]: I0201 08:17:37.966454 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:17:37 crc kubenswrapper[4650]: I0201 08:17:37.966991 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:17:37 crc kubenswrapper[4650]: I0201 08:17:37.967062 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:17:37 crc kubenswrapper[4650]: I0201 08:17:37.967187 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:17:37 crc kubenswrapper[4650]: E0201 08:17:37.967792 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:17:39 crc kubenswrapper[4650]: I0201 08:17:39.806064 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:39 crc kubenswrapper[4650]: I0201 08:17:39.808952 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:39 crc kubenswrapper[4650]: I0201 08:17:39.809082 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:17:39 crc kubenswrapper[4650]: I0201 08:17:39.810177 4650 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490"} pod="openstack/swift-proxy-599d7597b9-mh6hj" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Feb 01 08:17:39 crc kubenswrapper[4650]: I0201 08:17:39.810238 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:39 crc kubenswrapper[4650]: I0201 08:17:39.810278 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" containerID="cri-o://e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" gracePeriod=30 Feb 01 08:17:39 crc kubenswrapper[4650]: I0201 08:17:39.820939 4650 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 01 08:17:39 crc kubenswrapper[4650]: E0201 08:17:39.938713 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:40 crc kubenswrapper[4650]: I0201 08:17:40.809871 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" exitCode=0 Feb 01 08:17:40 crc kubenswrapper[4650]: I0201 08:17:40.809920 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490"} Feb 01 08:17:40 crc kubenswrapper[4650]: I0201 08:17:40.809962 4650 scope.go:117] "RemoveContainer" containerID="b960b69c7c80aa982a6dc3b9a0cf601a6cde9756bb8c3be067768e32c6cd32ac" Feb 01 08:17:40 crc kubenswrapper[4650]: I0201 08:17:40.810723 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:17:40 crc kubenswrapper[4650]: I0201 08:17:40.810761 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:40 crc kubenswrapper[4650]: E0201 08:17:40.811166 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:17:52 crc kubenswrapper[4650]: I0201 08:17:52.965507 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:17:52 crc kubenswrapper[4650]: I0201 08:17:52.966142 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:17:52 crc kubenswrapper[4650]: I0201 08:17:52.966172 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:17:52 crc kubenswrapper[4650]: I0201 08:17:52.966280 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:17:52 crc kubenswrapper[4650]: E0201 08:17:52.966723 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:17:53 crc kubenswrapper[4650]: I0201 08:17:53.966748 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:17:53 crc kubenswrapper[4650]: I0201 08:17:53.966781 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:17:53 crc kubenswrapper[4650]: E0201 08:17:53.967092 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:18:06 crc kubenswrapper[4650]: I0201 08:18:06.965827 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:18:06 crc kubenswrapper[4650]: I0201 08:18:06.966404 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:18:06 crc kubenswrapper[4650]: E0201 08:18:06.966674 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:18:07 crc kubenswrapper[4650]: I0201 08:18:07.966095 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:18:07 crc kubenswrapper[4650]: I0201 08:18:07.966716 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:18:07 crc kubenswrapper[4650]: I0201 08:18:07.966812 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:18:07 crc kubenswrapper[4650]: I0201 08:18:07.966956 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:18:07 crc kubenswrapper[4650]: E0201 08:18:07.967700 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.784311 4650 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lp69c"] Feb 01 08:18:17 crc kubenswrapper[4650]: E0201 08:18:17.785968 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="extract-utilities" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786003 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="extract-utilities" Feb 01 08:18:17 crc kubenswrapper[4650]: E0201 08:18:17.786095 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="registry-server" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786117 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="registry-server" Feb 01 08:18:17 crc kubenswrapper[4650]: E0201 08:18:17.786169 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="extract-content" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786186 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="extract-content" Feb 01 08:18:17 crc kubenswrapper[4650]: E0201 08:18:17.786223 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="extract-utilities" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786242 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="extract-utilities" Feb 01 08:18:17 crc kubenswrapper[4650]: E0201 08:18:17.786277 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="extract-content" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786293 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="extract-content" Feb 01 08:18:17 crc kubenswrapper[4650]: E0201 08:18:17.786318 4650 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="registry-server" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786336 4650 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="registry-server" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786785 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fa7677f-e39c-4496-9e1f-67f096256b84" containerName="registry-server" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.786825 4650 memory_manager.go:354] "RemoveStaleState removing state" podUID="92f9bbf3-e779-40be-81d3-7058073fd3d2" containerName="registry-server" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.790217 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.795157 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lp69c"] Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.971145 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-catalog-content\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.971416 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-utilities\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:17 crc kubenswrapper[4650]: I0201 08:18:17.971610 4650 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd9lx\" (UniqueName: \"kubernetes.io/projected/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-kube-api-access-pd9lx\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.073988 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-utilities\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.074203 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd9lx\" (UniqueName: \"kubernetes.io/projected/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-kube-api-access-pd9lx\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.074312 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-catalog-content\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.074807 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-catalog-content\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.075150 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-utilities\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.103654 4650 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd9lx\" (UniqueName: \"kubernetes.io/projected/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-kube-api-access-pd9lx\") pod \"redhat-marketplace-lp69c\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.119790 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:18 crc kubenswrapper[4650]: I0201 08:18:18.642882 4650 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lp69c"] Feb 01 08:18:19 crc kubenswrapper[4650]: I0201 08:18:19.198934 4650 generic.go:334] "Generic (PLEG): container finished" podID="389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" containerID="a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa" exitCode=0 Feb 01 08:18:19 crc kubenswrapper[4650]: I0201 08:18:19.198986 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lp69c" event={"ID":"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32","Type":"ContainerDied","Data":"a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa"} Feb 01 08:18:19 crc kubenswrapper[4650]: I0201 08:18:19.199009 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lp69c" event={"ID":"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32","Type":"ContainerStarted","Data":"b9c10408b18ea81e7513169d78142245f155c52983f9d5dbe9b92a9e2fd56f83"} Feb 01 08:18:20 crc kubenswrapper[4650]: I0201 08:18:20.210125 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lp69c" event={"ID":"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32","Type":"ContainerStarted","Data":"504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca"} Feb 01 08:18:20 crc kubenswrapper[4650]: I0201 08:18:20.965825 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:18:20 crc kubenswrapper[4650]: I0201 08:18:20.966149 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:18:20 crc kubenswrapper[4650]: E0201 08:18:20.966414 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:18:21 crc kubenswrapper[4650]: I0201 08:18:21.224298 4650 generic.go:334] "Generic (PLEG): container finished" podID="389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" containerID="504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca" exitCode=0 Feb 01 08:18:21 crc kubenswrapper[4650]: I0201 08:18:21.224361 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lp69c" event={"ID":"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32","Type":"ContainerDied","Data":"504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca"} Feb 01 08:18:21 crc kubenswrapper[4650]: I0201 08:18:21.979756 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:18:21 crc kubenswrapper[4650]: I0201 08:18:21.980157 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:18:21 crc kubenswrapper[4650]: I0201 08:18:21.980211 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:18:21 crc kubenswrapper[4650]: I0201 08:18:21.980320 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:18:21 crc kubenswrapper[4650]: E0201 08:18:21.980894 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:18:22 crc kubenswrapper[4650]: I0201 08:18:22.234344 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lp69c" event={"ID":"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32","Type":"ContainerStarted","Data":"2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9"} Feb 01 08:18:22 crc kubenswrapper[4650]: I0201 08:18:22.254720 4650 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lp69c" podStartSLOduration=2.81361213 podStartE2EDuration="5.254701612s" podCreationTimestamp="2026-02-01 08:18:17 +0000 UTC" firstStartedPulling="2026-02-01 08:18:19.2011481 +0000 UTC m=+3297.924246355" lastFinishedPulling="2026-02-01 08:18:21.642237592 +0000 UTC m=+3300.365335837" observedRunningTime="2026-02-01 08:18:22.250825259 +0000 UTC m=+3300.973923504" watchObservedRunningTime="2026-02-01 08:18:22.254701612 +0000 UTC m=+3300.977799867" Feb 01 08:18:28 crc kubenswrapper[4650]: I0201 08:18:28.120926 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:28 crc kubenswrapper[4650]: I0201 08:18:28.121530 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:28 crc kubenswrapper[4650]: I0201 08:18:28.174414 4650 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:28 crc kubenswrapper[4650]: I0201 08:18:28.360189 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:28 crc kubenswrapper[4650]: I0201 08:18:28.449000 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lp69c"] Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.309966 4650 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lp69c" podUID="389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" containerName="registry-server" containerID="cri-o://2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9" gracePeriod=2 Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.775432 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.829343 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd9lx\" (UniqueName: \"kubernetes.io/projected/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-kube-api-access-pd9lx\") pod \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.829393 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-utilities\") pod \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.829500 4650 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-catalog-content\") pod \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\" (UID: \"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32\") " Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.831880 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-utilities" (OuterVolumeSpecName: "utilities") pod "389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" (UID: "389d3ecd-1fe1-420a-bb00-ac5ca83ddf32"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.840291 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-kube-api-access-pd9lx" (OuterVolumeSpecName: "kube-api-access-pd9lx") pod "389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" (UID: "389d3ecd-1fe1-420a-bb00-ac5ca83ddf32"). InnerVolumeSpecName "kube-api-access-pd9lx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.863034 4650 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" (UID: "389d3ecd-1fe1-420a-bb00-ac5ca83ddf32"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.931751 4650 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd9lx\" (UniqueName: \"kubernetes.io/projected/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-kube-api-access-pd9lx\") on node \"crc\" DevicePath \"\"" Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.931785 4650 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-utilities\") on node \"crc\" DevicePath \"\"" Feb 01 08:18:30 crc kubenswrapper[4650]: I0201 08:18:30.931794 4650 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.321602 4650 generic.go:334] "Generic (PLEG): container finished" podID="389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" containerID="2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9" exitCode=0 Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.321657 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lp69c" event={"ID":"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32","Type":"ContainerDied","Data":"2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9"} Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.321692 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lp69c" event={"ID":"389d3ecd-1fe1-420a-bb00-ac5ca83ddf32","Type":"ContainerDied","Data":"b9c10408b18ea81e7513169d78142245f155c52983f9d5dbe9b92a9e2fd56f83"} Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.321712 4650 scope.go:117] "RemoveContainer" containerID="2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.321732 4650 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lp69c" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.348350 4650 scope.go:117] "RemoveContainer" containerID="504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.391776 4650 scope.go:117] "RemoveContainer" containerID="a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.395308 4650 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lp69c"] Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.409317 4650 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lp69c"] Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.447862 4650 scope.go:117] "RemoveContainer" containerID="2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9" Feb 01 08:18:31 crc kubenswrapper[4650]: E0201 08:18:31.450542 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9\": container with ID starting with 2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9 not found: ID does not exist" containerID="2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.450611 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9"} err="failed to get container status \"2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9\": rpc error: code = NotFound desc = could not find container \"2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9\": container with ID starting with 2641ca6ae1f5558862635039d2a0ec429c53374e3f1bae2014c68170458bafb9 not found: ID does not exist" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.450657 4650 scope.go:117] "RemoveContainer" containerID="504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca" Feb 01 08:18:31 crc kubenswrapper[4650]: E0201 08:18:31.451712 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca\": container with ID starting with 504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca not found: ID does not exist" containerID="504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.451778 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca"} err="failed to get container status \"504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca\": rpc error: code = NotFound desc = could not find container \"504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca\": container with ID starting with 504ad8c9bf4bc4cbfc10377a70853ff5a8530016d0a1a90b23022c0584f29fca not found: ID does not exist" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.451805 4650 scope.go:117] "RemoveContainer" containerID="a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa" Feb 01 08:18:31 crc kubenswrapper[4650]: E0201 08:18:31.455605 4650 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa\": container with ID starting with a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa not found: ID does not exist" containerID="a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.455665 4650 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa"} err="failed to get container status \"a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa\": rpc error: code = NotFound desc = could not find container \"a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa\": container with ID starting with a0f172014b24cd8601db3b3a5f350ababfe4a3c5e05412ea0dbaf0e4453325fa not found: ID does not exist" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.977760 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.978143 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:18:31 crc kubenswrapper[4650]: I0201 08:18:31.978455 4650 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="389d3ecd-1fe1-420a-bb00-ac5ca83ddf32" path="/var/lib/kubelet/pods/389d3ecd-1fe1-420a-bb00-ac5ca83ddf32/volumes" Feb 01 08:18:31 crc kubenswrapper[4650]: E0201 08:18:31.979393 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:18:34 crc kubenswrapper[4650]: I0201 08:18:34.972481 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:18:34 crc kubenswrapper[4650]: I0201 08:18:34.974152 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:18:34 crc kubenswrapper[4650]: I0201 08:18:34.974250 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:18:34 crc kubenswrapper[4650]: I0201 08:18:34.974729 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:18:34 crc kubenswrapper[4650]: E0201 08:18:34.979625 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:18:45 crc kubenswrapper[4650]: I0201 08:18:45.967386 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:18:45 crc kubenswrapper[4650]: I0201 08:18:45.968090 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:18:45 crc kubenswrapper[4650]: E0201 08:18:45.970358 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:18:46 crc kubenswrapper[4650]: I0201 08:18:46.966790 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:18:46 crc kubenswrapper[4650]: I0201 08:18:46.967202 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:18:46 crc kubenswrapper[4650]: I0201 08:18:46.967231 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:18:46 crc kubenswrapper[4650]: I0201 08:18:46.967325 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:18:46 crc kubenswrapper[4650]: E0201 08:18:46.967746 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:18:56 crc kubenswrapper[4650]: I0201 08:18:56.965277 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:18:56 crc kubenswrapper[4650]: I0201 08:18:56.965728 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:18:56 crc kubenswrapper[4650]: E0201 08:18:56.965988 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:18:59 crc kubenswrapper[4650]: I0201 08:18:59.966434 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:18:59 crc kubenswrapper[4650]: I0201 08:18:59.968433 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:18:59 crc kubenswrapper[4650]: I0201 08:18:59.968658 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:18:59 crc kubenswrapper[4650]: I0201 08:18:59.968915 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:18:59 crc kubenswrapper[4650]: E0201 08:18:59.969710 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:19:07 crc kubenswrapper[4650]: I0201 08:19:07.161393 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:19:07 crc kubenswrapper[4650]: I0201 08:19:07.162196 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:19:09 crc kubenswrapper[4650]: I0201 08:19:09.734917 4650 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices\") pod \"swift-ring-rebalance-lr89m\" (UID: \"c5a1d51a-35a2-49a9-b337-679c75ddea99\") " pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:19:09 crc kubenswrapper[4650]: E0201 08:19:09.735132 4650 configmap.go:193] Couldn't get configMap openstack/swift-ring-config-data: configmap "swift-ring-config-data" not found Feb 01 08:19:09 crc kubenswrapper[4650]: E0201 08:19:09.735582 4650 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices podName:c5a1d51a-35a2-49a9-b337-679c75ddea99 nodeName:}" failed. No retries permitted until 2026-02-01 08:21:11.735544829 +0000 UTC m=+3470.458643114 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/c5a1d51a-35a2-49a9-b337-679c75ddea99-ring-data-devices") pod "swift-ring-rebalance-lr89m" (UID: "c5a1d51a-35a2-49a9-b337-679c75ddea99") : configmap "swift-ring-config-data" not found Feb 01 08:19:09 crc kubenswrapper[4650]: I0201 08:19:09.965458 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:09 crc kubenswrapper[4650]: I0201 08:19:09.965504 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:19:09 crc kubenswrapper[4650]: E0201 08:19:09.965978 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:19:14 crc kubenswrapper[4650]: I0201 08:19:14.967414 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:19:14 crc kubenswrapper[4650]: I0201 08:19:14.968174 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:19:14 crc kubenswrapper[4650]: I0201 08:19:14.968222 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:19:14 crc kubenswrapper[4650]: I0201 08:19:14.968399 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:19:14 crc kubenswrapper[4650]: E0201 08:19:14.968994 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:19:24 crc kubenswrapper[4650]: I0201 08:19:24.966264 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:24 crc kubenswrapper[4650]: I0201 08:19:24.967160 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:19:25 crc kubenswrapper[4650]: E0201 08:19:25.218971 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:19:25 crc kubenswrapper[4650]: I0201 08:19:25.858901 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerStarted","Data":"e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c"} Feb 01 08:19:25 crc kubenswrapper[4650]: I0201 08:19:25.859453 4650 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:19:25 crc kubenswrapper[4650]: I0201 08:19:25.859696 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:25 crc kubenswrapper[4650]: E0201 08:19:25.859962 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.875835 4650 generic.go:334] "Generic (PLEG): container finished" podID="39a11122-6fd9-463b-8194-c098d9e764ec" containerID="e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c" exitCode=1 Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.876141 4650 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599d7597b9-mh6hj" event={"ID":"39a11122-6fd9-463b-8194-c098d9e764ec","Type":"ContainerDied","Data":"e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c"} Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.876370 4650 scope.go:117] "RemoveContainer" containerID="25ab4d29e4d92e60bc2356d2dd4d3b9dba0c96ee813994c24c5be702eb6c7105" Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.877856 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.877892 4650 scope.go:117] "RemoveContainer" containerID="e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c" Feb 01 08:19:26 crc kubenswrapper[4650]: E0201 08:19:26.878448 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.965883 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.966006 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.966085 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:19:26 crc kubenswrapper[4650]: I0201 08:19:26.966202 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:19:26 crc kubenswrapper[4650]: E0201 08:19:26.966647 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:19:27 crc kubenswrapper[4650]: I0201 08:19:27.800176 4650 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/swift-proxy-599d7597b9-mh6hj" Feb 01 08:19:27 crc kubenswrapper[4650]: I0201 08:19:27.888751 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:27 crc kubenswrapper[4650]: I0201 08:19:27.888788 4650 scope.go:117] "RemoveContainer" containerID="e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c" Feb 01 08:19:27 crc kubenswrapper[4650]: E0201 08:19:27.889151 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:19:28 crc kubenswrapper[4650]: E0201 08:19:28.682303 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-ring-rebalance-lr89m" podUID="c5a1d51a-35a2-49a9-b337-679c75ddea99" Feb 01 08:19:28 crc kubenswrapper[4650]: I0201 08:19:28.897671 4650 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lr89m" Feb 01 08:19:28 crc kubenswrapper[4650]: I0201 08:19:28.898614 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:28 crc kubenswrapper[4650]: I0201 08:19:28.898649 4650 scope.go:117] "RemoveContainer" containerID="e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c" Feb 01 08:19:28 crc kubenswrapper[4650]: E0201 08:19:28.899081 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:19:37 crc kubenswrapper[4650]: I0201 08:19:37.161456 4650 patch_prober.go:28] interesting pod/machine-config-daemon-xfq9r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 01 08:19:37 crc kubenswrapper[4650]: I0201 08:19:37.162306 4650 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xfq9r" podUID="8dd1b5da-94bb-4bf2-8fed-958df80a8806" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 01 08:19:38 crc kubenswrapper[4650]: I0201 08:19:38.967286 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:19:38 crc kubenswrapper[4650]: I0201 08:19:38.967863 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:19:38 crc kubenswrapper[4650]: I0201 08:19:38.967921 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:19:38 crc kubenswrapper[4650]: I0201 08:19:38.968108 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:19:38 crc kubenswrapper[4650]: E0201 08:19:38.969576 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:19:39 crc kubenswrapper[4650]: I0201 08:19:39.965739 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:39 crc kubenswrapper[4650]: I0201 08:19:39.965773 4650 scope.go:117] "RemoveContainer" containerID="e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c" Feb 01 08:19:39 crc kubenswrapper[4650]: E0201 08:19:39.966085 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" Feb 01 08:19:53 crc kubenswrapper[4650]: I0201 08:19:53.966480 4650 scope.go:117] "RemoveContainer" containerID="0265500762b941fd40a2695987cd6717bfdcb1446959242352eb48ded7daccc3" Feb 01 08:19:53 crc kubenswrapper[4650]: I0201 08:19:53.967445 4650 scope.go:117] "RemoveContainer" containerID="55efe142d48f9f639b5cf3d0dcd406d6f24329c684314f2f0a9688683939bd5d" Feb 01 08:19:53 crc kubenswrapper[4650]: I0201 08:19:53.967515 4650 scope.go:117] "RemoveContainer" containerID="1a73042593e71a9b7727d8c982b54add289aeb80e3b13b7d7c63b62ac12b6c3b" Feb 01 08:19:53 crc kubenswrapper[4650]: I0201 08:19:53.967881 4650 scope.go:117] "RemoveContainer" containerID="217bc264b17a0a0b06a04a8bc89829f586a73d1a7e204f555e698502d3ae1935" Feb 01 08:19:53 crc kubenswrapper[4650]: E0201 08:19:53.970090 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_openstack(78a7b8d6-a107-4698-b85d-77d415755428)\"]" pod="openstack/swift-storage-0" podUID="78a7b8d6-a107-4698-b85d-77d415755428" Feb 01 08:19:54 crc kubenswrapper[4650]: I0201 08:19:54.965085 4650 scope.go:117] "RemoveContainer" containerID="e6e7088b959fd7a048b78d3e71d3c37a1f9b7506ffc479941e38e4ea688a4490" Feb 01 08:19:54 crc kubenswrapper[4650]: I0201 08:19:54.965118 4650 scope.go:117] "RemoveContainer" containerID="e3900a9dae4108eeb8796502c3cef4d00277befcdd5bfacd73b326997bad355c" Feb 01 08:19:54 crc kubenswrapper[4650]: E0201 08:19:54.965427 4650 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-599d7597b9-mh6hj_openstack(39a11122-6fd9-463b-8194-c098d9e764ec)\"]" pod="openstack/swift-proxy-599d7597b9-mh6hj" podUID="39a11122-6fd9-463b-8194-c098d9e764ec" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515137606270024454 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015137606271017372 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015137577203016517 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015137577203015467 5ustar corecore